diff --git a/.gitignore b/.gitignore
index d53886c71a17d08e066505b6bd7128a392689007..4b28ea2622a2db20f82b2dba6be20fb8ce4e3183 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,4 +16,5 @@ backgrounds/*
 *.bmp
 *png
 *.jpg
-*.jpeg
\ No newline at end of file
+*.jpeg
+*.thumb
\ No newline at end of file
diff --git a/global_script.py b/global_script.py
index e48d7e8004c03ddc7ac7c3eede8f602d78452d36..d5bb452c8364ee38e43f9d4a7a83cbdd617f3d87 100644
--- a/global_script.py
+++ b/global_script.py
@@ -5,7 +5,6 @@ import os
 import sys
 import shutil
 from collections import OrderedDict
-from math import prod
 import time
 
 import bpy
@@ -113,9 +112,7 @@ def main():
     C.scene.collection.objects.link(camera_object)
     camera_object.rotation_mode = 'XYZ'
 
-    # camera_object.location = [-.97, -0.24, 0.68]
     camera_object.location = [-.97, -0.1, 0.68]
-    # camera_rotation_0 = utils.r(Vector([72, 8, -75]))
     camera_rotation_0 = utils.r(Vector([73, 8, -82]))
     camera_object.rotation_euler = camera_rotation_0
 
@@ -301,7 +298,6 @@ def main():
                 lbl for bone in bone_lbls for lbl in [bone + k for k in ['_head', '_tail']] if '_IK' not in lbl
             ]
             face_lbls = ['nose', 'eye_l', 'eye_r', 'ear_l', 'ear_r']
-            # full_lbls = [lbl for lbl in bone_lbls if lbl not in face_lbls] + face_lbls
             full_lbls = bone_lbls + face_lbls
             annot_file_2D.write(
                 ';'.join([lbl for bone in full_lbls for lbl in [bone + k for k in ['_x', '_y']]]) + '\n')
diff --git a/mh_models/blender.thumb b/mh_models/blender.thumb
deleted file mode 100644
index 6f3fe1470195c3da011f49ca70844563dea838e6..0000000000000000000000000000000000000000
Binary files a/mh_models/blender.thumb and /dev/null differ
diff --git a/scripts/camera_proj_old.py b/scripts/camera_proj_old.py
deleted file mode 100644
index 9fd0b76f6bcdc6f2989ec4bba0de3cd38e07fe26..0000000000000000000000000000000000000000
--- a/scripts/camera_proj_old.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import bpy
-import bpy_extras
-from mathutils import Matrix
-from mathutils import Vector
-
-
-# ---------------------------------------------------------------
-# 3x4 P matrix from Blender camera
-# ---------------------------------------------------------------
-
-# Build intrinsic camera parameters from Blender camera data
-# From https://blender.stackexchange.com/a/38210/153600
-# See notes on this in
-# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
-def get_calibration_matrix_K_from_blender(camd):
-    f_in_mm = camd.lens
-    scene = bpy.context.scene
-    resolution_x_in_px = scene.render.resolution_x
-    resolution_y_in_px = scene.render.resolution_y
-    scale = scene.render.resolution_percentage / 100
-    sensor_width_in_mm = camd.sensor_width
-    sensor_height_in_mm = camd.sensor_height
-    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
-    if (camd.sensor_fit == 'VERTICAL'):
-        # the sensor height is fixed (sensor fit is horizontal),
-        # the sensor width is effectively changed with the pixel aspect ratio
-        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
-        s_v = resolution_y_in_px * scale / sensor_height_in_mm
-    else:  # 'HORIZONTAL' and 'AUTO'
-        # the sensor width is fixed (sensor fit is horizontal),
-        # the sensor height is effectively changed with the pixel aspect ratio
-        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
-        s_u = resolution_x_in_px * scale / sensor_width_in_mm
-        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
-
-    # Parameters of intrinsic calibration matrix K
-    alpha_u = f_in_mm * s_u
-    alpha_v = f_in_mm * s_v
-    u_0 = resolution_x_in_px * scale / 2
-    v_0 = resolution_y_in_px * scale / 2
-    skew = 0  # only use rectangular pixels
-
-    K = Matrix(
-        ((alpha_u, skew, u_0),
-         (0, alpha_v, v_0),
-         (0, 0, 1)))
-    return K
-
-
-# Returns camera rotation and translation matrices from Blender.
-#
-# There are 3 coordinate systems involved:
-#    1. The World coordinates: "world"
-#       - right-handed
-#    2. The Blender camera coordinates: "bcam"
-#       - x is horizontal
-#       - y is up
-#       - right-handed: negative z look-at direction
-#    3. The desired computer vision camera coordinates: "cv"
-#       - x is horizontal
-#       - y is down (to align to the actual pixel coordinates
-#         used in digital images)
-#       - right-handed: positive z look-at direction
-def get_3x4_RT_matrix_from_blender(cam):
-    # bcam stands for blender camera
-    R_bcam2cv = Matrix(
-        ((1, 0, 0),
-         (0, -1, 0),
-         (0, 0, -1)))
-
-    # Transpose since the rotation is object rotation,
-    # and we want coordinate rotation
-    # R_world2bcam = cam.rotation_euler.to_matrix().transposed()
-    # T_world2bcam = -1*R_world2bcam * location
-    #
-    # Use matrix_world instead to account for all constraints
-    # location, rotation = cam.matrix_world.decompose()[0:2]
-    location, rotation = cam.matrix_basis.decompose()[0:2]
-    R_world2bcam = rotation.to_matrix().transposed()
-
-    # Convert camera location to translation vector used in coordinate changes
-    # T_world2bcam = -1*R_world2bcam*cam.location
-    # Use location from matrix_world to account for constraints:
-    T_world2bcam = -1 * R_world2bcam @ location
-
-    # Build the coordinate transform matrix from world to computer vision camera
-    # NOTE: Use * instead of @ here for older versions of Blender
-    R_world2cv = R_bcam2cv @ R_world2bcam
-    T_world2cv = R_bcam2cv @ T_world2bcam
-
-    # put into 3x4 matrix
-    RT = Matrix((
-        R_world2cv[0][:] + (T_world2cv[0],),
-        R_world2cv[1][:] + (T_world2cv[1],),
-        R_world2cv[2][:] + (T_world2cv[2],)
-    ))
-    return RT
-
-
-def get_3x4_P_matrix_from_blender(cam):
-    K = get_calibration_matrix_K_from_blender(cam.data)
-    RT = get_3x4_RT_matrix_from_blender(cam)
-    return K @ RT, K, RT
-
-
-# ----------------------------------------------------------
-# Alternate 3D coordinates to 2D pixel coordinate projection code
-# adapted from https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex?lq=1
-# to have the y axes pointing up and origin at the top-left corner
-def project_by_object_utils(cam, point):
-    scene = bpy.context.scene
-    co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
-    render_scale = scene.render.resolution_percentage / 100
-    render_size = (
-        int(scene.render.resolution_x * render_scale),
-        int(scene.render.resolution_y * render_scale),
-    )
-    return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
diff --git a/scripts/human.py b/scripts/human.py
index 821698b4641827ac07be95ae9494fc65a0c3431f..051390136869f913252e445d2b105b5f786fad47 100644
--- a/scripts/human.py
+++ b/scripts/human.py
@@ -1,11 +1,7 @@
-import sys
-from math import cos, sin, pi, acos
-import random
 import json
 import glob
 import os
 
-import numpy as np
 from mathutils import Vector, Matrix
 import colorsys
 
@@ -166,9 +162,7 @@ class Human:
                         setattr(bone, f'ik_min_{axe}', utils.r(const[0]))
                         setattr(bone, f'ik_max_{axe}', utils.r(const[1]))
 
-            # self.model.pose.bones[f'hand_{s}_IK'].location = Vector((-15, 10, 0))
-            # self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 5, -10))
-            self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 1.2, 0.25))
+        self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 1.2, 0.25))
 
         for i in '123':
             self.model.pose.bones[f'spine_0{i}'].lock_ik_x = False
@@ -256,34 +250,9 @@ def switch_constraints(model, enable=False):
 
 
 def set_bounds(model, car=None):
-    # set_floors(model, car)
     set_shrinkwraps(model, car)
 
 
-def set_floors(model, car=None):
-    original_mode = bpy.context.mode
-    utils.select_only(model)
-    bpy.ops.object.mode_set(mode='POSE')
-
-    planes = None
-    if car is not None:
-        planes = [ch for ch in car.children_recursive if ch.name[:5] == 'Plane']
-
-    for s in 'lr':
-        bone = model.pose.bones[f'hand_{s}_IK']
-        [bone.constraints.remove(constr) for constr in bone.constraints if 'Floor' in constr.name]
-        if car is None:
-            continue
-        for obj in planes:
-            constr = bone.constraints.new('FLOOR')
-            constr.target = obj
-            constr.floor_location = 'FLOOR_Z'
-            constr.use_rotation = True
-            constr.offset = 1
-
-    bpy.ops.object.mode_set(mode=original_mode)
-
-
 def set_shrinkwraps(model, car=None):
     original_mode = bpy.context.mode
     utils.select_only(model)
diff --git a/scripts/random_pose.py b/scripts/random_pose.py
index 8d85aa0a43ac4cb068046b40034a6316a0ec288a..4c27112ba74aadb0fe9ece8ecc55153cc70b2a2c 100644
--- a/scripts/random_pose.py
+++ b/scripts/random_pose.py
@@ -1,9 +1,6 @@
-import sys
 from math import cos, sin, pi, acos
-import random
 
 from mathutils import Vector
-import numpy as np
 
 from scripts import utils
 
@@ -74,14 +71,6 @@ def reset_subject(subject):
         bone.rotation_euler = (0, 0, 0)
 
     bpy.ops.object.mode_set(mode='OBJECT')
-    scale = round(subject.scale[0] * (1.6 + 0.3 * random.random()) / (subject.dimensions.y), 3)
-    # subject.scale = [scale] * 3
-
-    # subject.scale = [0.1] * 3
-    # height = subject.dimensions.y
-    # subject.location = [0, -0.16, -0.5 * 1.66]
-    # subject.location = [0, -0.16, -0.5 * height]
-    # subject.rotation_euler = r([65, 0, 0])
     subject.scale = [0.9] * 3
     sit_height = utils.get_head_pose('pelvis', subject).z * 0.88
     subject.location = [0, -0.04, - sit_height]
@@ -160,12 +149,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
 
     pose.use_auto_ik = auto_ik
 
-    targets_test = {
-        'l': Vector((0.3, -0.1, 0.4)),
-        'r': Vector((-0.4, -0.1, 0.2))
-    }
-
-    back_rota_fact = sin(rota('spine_03').y) / sin(r(30))
     for s in ['l', 'r']:
         # Disconnect clavicle
         armature = bpy.data.armatures[subject.name]
@@ -181,7 +164,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
         if targets is None:
             target = Vector()
             shoulder_pose = get_head_pose(f'upperarm_{s}', subject)
-            min_arm_factor = 0.2
 
             back_forward_angle = rota('spine_03').x + rota('spine_01').x - r(30)  # 0 = straight
 
@@ -196,7 +178,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
             theta = acos(costheta)
 
             min_arm_factor = 0.2 + max(sin(back_forward_angle), 0)
-            # print(min_arm_factor, d(phi), d(theta))
             u = random.uniform(min_arm_factor, 1) * arm_length
 
             target.x = u * sin(theta) * cos(phi)
@@ -206,9 +187,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
             target += shoulder_pose
 
             hand_pose(pose, side=s)
-
-            temp_rota = rota(f'upperarm_{s}') + rota(f'lowerarm_{s}')
-            # target = targets_test[s]
         else:
             if id_targets is None:
                 target = random.choice(targets[s])
@@ -226,8 +204,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
             target = target.location
 
         location = get_head_pose(bone.name, subject)
-        # print(location)
-        # print(target)
         bpy.ops.transform.translate(value=target - location)
 
     if targets is not None:
diff --git a/scripts/utils.py b/scripts/utils.py
index d135644013af1e31f23692dd1bad3cde9f8c4560..0c4bcb4eedeb0bec31f1f6dd26f4e84fbd5422f7 100644
--- a/scripts/utils.py
+++ b/scripts/utils.py
@@ -3,7 +3,6 @@ from math import pi
 
 import bpy
 import numpy as np
-from mathutils import Matrix, Vector, Euler
 
 
 # Maths utils