diff --git a/.gitignore b/.gitignore
index 7333790336cfa4496f842b65a3be0bea6c533516..3ce84ab10886353e8c26818a740daca6da81279e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 car_models/*
 output/*
+output*/*
 mh_models/*
 
 backgrounds/*
diff --git a/base.blend b/base.blend
index f75511705b22c7bbc7a69877a0e7ca6b358ee7db..5c3a27ee5eb9f3e07b8355a583f6982780f72106 100644
Binary files a/base.blend and b/base.blend differ
diff --git a/global_script.py b/global_script.py
index 4bf50964e6197b6c29409a505ecd529227b5e28c..7a0e2129d453185b1178c289e730e1170fece170 100644
--- a/global_script.py
+++ b/global_script.py
@@ -5,6 +5,8 @@ import os
 import sys
 import shutil
 from collections import OrderedDict
+from math import prod
+import time
 
 import bpy
 from mathutils import Vector
@@ -102,14 +104,17 @@ camera_data.type = 'PERSP'
 camera_data.lens_unit = 'FOV'
 camera_data.angle = utils.r(68)
 
-C.scene.render.resolution_x = 640
-C.scene.render.resolution_y = 480
+C.scene.render.resolution_x = 256
+C.scene.render.resolution_y = int(480 / 640 * 256)
 
 camera_object = D.objects.new('Camera', camera_data)
 C.scene.collection.objects.link(camera_object)
 
-camera_object.location = [-.97, -0.11, 0.68]
-camera_object.rotation_euler = utils.r([72, 8, -82])
+camera_object.location = [-.97, -0.24, 0.68]
+# camera_object.location = [-.97, -0.11, 0.68]
+camera_rotation_0 = utils.r(Vector([72, 8, -75]))
+camera_object.rotation_euler = camera_rotation_0
+# camera_object.rotation_euler = utils.r([72, 8, -82])
 
 # set background
 back_folder = abs_path("backgrounds")
@@ -137,7 +142,7 @@ bpy.ops.import_image.to_plane(
 image_holder = C.active_object
 image_holder.name = 'Image_holder'
 image_holder.location = (4, 1.5, 1.3)
-image_holder.rotation_euler.z = utils.r(-100)
+image_holder.rotation_euler.z = utils.r(-95)
 image_holder.active_material.shadow_method = 'NONE'
 
 # add light
@@ -176,12 +181,13 @@ C.scene.sun_pos_properties.use_day_of_year = True
 C.scene.sun_pos_properties.year = 2022
 C.scene.sun_pos_properties.day_of_year = 182
 
-fp = abs_path(r"output")
+fp = abs_path(r"output_temp")
 fp_img = os.path.join(fp, 'images')
 fp_ann_2D = os.path.join(fp, 'annots_2D')
 fp_ann_3D = os.path.join(fp, 'annots_3D')
 
 info_path = os.path.join(fp, 'infos.json')
+cam_path = os.path.join(fp, 'cameras.json')
 scenes_ids = OrderedDict()
 if not CONTINUE or not os.path.isfile(info_path):
     if os.path.isdir(fp):
@@ -194,21 +200,22 @@ if not CONTINUE or not os.path.isfile(info_path):
 
 frame_rate = 25
 nb_scene = 1
-nb_pose = 30
+nb_pose = 1
 human_loader.max_len = min(human_loader.max_len, nb_scene)
 ratio_conf_man = int(nb_scene / len(human_loader.human_paths))
 if CONTINUE:
     with open(info_path) as f_info:
-        scene_ids = json.load(f_info, object_pairs_hook=OrderedDict)['id_max_scenes']
+        scenes_ids = json.load(f_info, object_pairs_hook=OrderedDict)['id_max_scenes']
 
-    human_loader.human_paths = [hp for hp in human_loader.human_paths if hp not in scene_ids]
+    human_loader.human_paths = [hp for hp in human_loader.human_paths if hp not in scenes_ids]
 
 man = None
-for sc in range(nb_scene):
+for sc in range(ratio_conf_man * len(scenes_ids), nb_scene):
     # Random car
     car = car_picker()
     car_targets = {side: [ch for ch in car.children if f'Target_{side.upper()}' in ch.name] for side in 'lr'}
-    nb_targets = sum([len(v) for v in car_targets.values()])
+    nb_targets = len(car_targets['l']) * len(car_targets['r'])
+    print(nb_targets)
     # Random personne
     if ratio_conf_man < 1:
         if not sc % 10:
@@ -247,7 +254,7 @@ for sc in range(nb_scene):
     image_holder.location.y = 1.5 + random.uniform(-0.3, 0.3)
 
     # Camera movement
-    camera_object.rotation_euler = utils.r([72 + random.randint(-2, 2), 8, -82])
+    camera_object.rotation_euler = camera_rotation_0 + utils.r(Vector([random.randint(-2, 2), 0, 0]))
 
     C.scene.render.filepath = fp
     C.scene.render.image_settings.file_format = 'PNG'
@@ -257,12 +264,16 @@ for sc in range(nb_scene):
 
     file_root_name = f'{list(scenes_ids).index(human_path)}_{scenes_ids[human_path]}'
 
-    with open(os.path.join(fp, 'cameras.json'), 'a+') as f_cam:
-        previous_cameras = f_cam.read()
-        if previous_cameras:
-            previous_cameras = json.loads(previous_cameras)
-        else:
-            previous_cameras = {}
+    if os.path.isfile(cam_path):
+        with open(cam_path, 'r') as f_cam:
+            previous_cameras = f_cam.read()
+            if previous_cameras:
+                previous_cameras = json.loads(previous_cameras)
+            else:
+                previous_cameras = {}
+    else:
+        previous_cameras = {}
+    with open(cam_path, 'w') as f_cam:
         previous_cameras[file_root_name] = {
             'P': utils.mat_to_list(P),
             'K': utils.mat_to_list(K),
@@ -276,16 +287,24 @@ for sc in range(nb_scene):
     with open(os.path.join(fp_ann_2D, f'annotations_{file_root_name}.csv'), 'w') as annot_file_2D, \
             open(os.path.join(fp_ann_3D, f'annotations_{file_root_name}.csv'), 'w') as annot_file_3D:
         bone_lbls = list(man.pose.bones.keys())
-        annot_file_2D.write(';'.join([lbl for bone in bone_lbls for lbl in [bone + k for k in ['_x', '_y']]]) + '\n')
+        face_lbls = ['nose', 'eye_l', 'eye_r', 'hear_l', 'hear_r']
+        annot_file_2D.write(';'.join([lbl for bone in bone_lbls + face_lbls for lbl in [bone + k for k in ['_x', '_y']]]) + '\n')
         annot_file_3D.write(
-            ';'.join([lbl for bone in bone_lbls for lbl in [bone + k for k in ['_X', '_Y', '_Z']]]) + '\n')
+            ';'.join([lbl for bone in bone_lbls + face_lbls for lbl in [bone + k for k in ['_X', '_Y', '_Z']]]) + '\n')
 
         for po in range(nb_pose):
             C.scene.frame_set(po * frame_rate)
-            use_targets = nb_pose - po <= (nb_targets // 2) ** 2
+            use_targets = nb_pose - po - 1 < nb_targets
             # use_targets = False
             human.switch_constraints(man, enable=not use_targets)
-            random_pose.random_pose_ik(man, targets=car_targets if use_targets else None)
+            if nb_pose < nb_targets or not use_targets:
+                id_targets = None
+            else:
+                print(po, nb_pose - po - 1)
+                id_targets = {'l': (nb_pose - po - 1) % len(car_targets['l']),
+                              'r': (nb_pose - po - 1) // len(car_targets['l'])}
+                print(id_targets)
+            random_pose.random_pose_ik(man, targets=car_targets if use_targets else None, id_targets=id_targets)
 
             bpy.ops.object.mode_set(mode='OBJECT')
 
@@ -315,17 +334,28 @@ for sc in range(nb_scene):
                 bone_2d = P @ bone_3d
                 bone_2d /= bone_2d[-1]
                 annotations_2D.append(f"{bone_2d[0]:.2f};{bone_2d[1]:.2f}")
+
+            for lbl, bone_3d in human.get_face(man).items():
+                annotations_3D.append(f"{bone_3d[0]:.3f};{bone_3d[1]:.3f};{bone_3d[2]:.3f}")
+
+                bone_2d = P @ bone_3d
+                bone_2d /= bone_2d[-1]
+                annotations_2D.append(f"{bone_2d[0]:.2f};{bone_2d[1]:.2f}")
+
             annot_file_2D.write(';'.join(annotations_2D) + '\n')
             annot_file_3D.write(';'.join(annotations_3D) + '\n')
 
+    with open(info_path, 'w') as f_infos:
+        json.dump({
+            'models': list(scenes_ids),
+            'id_max_scenes': scenes_ids
+        }, f_infos, indent=4)
+
+    if sc % 20 == 19:
+        time.sleep(150)
+
 C.scene.frame_end = int(frame_rate * (nb_pose - 0.5))
 utils.select_only(man)
 bpy.ops.object.mode_set(mode='POSE')
 
-with open(info_path, 'w') as f:
-    json.dump({
-        'models': list(scenes_ids),
-        'id_max_scenes': scenes_ids
-    }, f, indent=4)
-
 print('Done', '#' * 25)
diff --git a/scripts/camera_proj.py b/scripts/camera_proj.py
index 9fd0b76f6bcdc6f2989ec4bba0de3cd38e07fe26..3f7b7a604e139dfd62804be6ac9ab101b7934cc8 100644
--- a/scripts/camera_proj.py
+++ b/scripts/camera_proj.py
@@ -1,48 +1,65 @@
 import bpy
-import bpy_extras
-from mathutils import Matrix
-from mathutils import Vector
+from mathutils import Matrix, Vector
 
 
 # ---------------------------------------------------------------
 # 3x4 P matrix from Blender camera
 # ---------------------------------------------------------------
 
+# BKE_camera_sensor_size
+def get_sensor_size(sensor_fit, sensor_x, sensor_y):
+    if sensor_fit == 'VERTICAL':
+        return sensor_y
+    return sensor_x
+
+
+# BKE_camera_sensor_fit
+def get_sensor_fit(sensor_fit, size_x, size_y):
+    if sensor_fit == 'AUTO':
+        if size_x >= size_y:
+            return 'HORIZONTAL'
+        else:
+            return 'VERTICAL'
+    return sensor_fit
+
+
 # Build intrinsic camera parameters from Blender camera data
-# From https://blender.stackexchange.com/a/38210/153600
+#
 # See notes on this in
 # blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
+# as well as
+# https://blender.stackexchange.com/a/120063/3581
 def get_calibration_matrix_K_from_blender(camd):
-    f_in_mm = camd.lens
+    if camd.type != 'PERSP':
+        raise ValueError('Non-perspective cameras not supported')
     scene = bpy.context.scene
-    resolution_x_in_px = scene.render.resolution_x
-    resolution_y_in_px = scene.render.resolution_y
+    f_in_mm = camd.lens
     scale = scene.render.resolution_percentage / 100
-    sensor_width_in_mm = camd.sensor_width
-    sensor_height_in_mm = camd.sensor_height
-    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
-    if (camd.sensor_fit == 'VERTICAL'):
-        # the sensor height is fixed (sensor fit is horizontal),
-        # the sensor width is effectively changed with the pixel aspect ratio
-        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
-        s_v = resolution_y_in_px * scale / sensor_height_in_mm
-    else:  # 'HORIZONTAL' and 'AUTO'
-        # the sensor width is fixed (sensor fit is horizontal),
-        # the sensor height is effectively changed with the pixel aspect ratio
-        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
-        s_u = resolution_x_in_px * scale / sensor_width_in_mm
-        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
+    resolution_x_in_px = scale * scene.render.resolution_x
+    resolution_y_in_px = scale * scene.render.resolution_y
+    sensor_size_in_mm = get_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height)
+    sensor_fit = get_sensor_fit(
+        camd.sensor_fit,
+        scene.render.pixel_aspect_x * resolution_x_in_px,
+        scene.render.pixel_aspect_y * resolution_y_in_px
+    )
+    pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
+    if sensor_fit == 'HORIZONTAL':
+        view_fac_in_px = resolution_x_in_px
+    else:
+        view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
+    pixel_size_mm_per_px = sensor_size_in_mm / f_in_mm / view_fac_in_px
+    s_u = 1 / pixel_size_mm_per_px
+    s_v = 1 / pixel_size_mm_per_px / pixel_aspect_ratio
 
     # Parameters of intrinsic calibration matrix K
-    alpha_u = f_in_mm * s_u
-    alpha_v = f_in_mm * s_v
-    u_0 = resolution_x_in_px * scale / 2
-    v_0 = resolution_y_in_px * scale / 2
+    u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
+    v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
     skew = 0  # only use rectangular pixels
 
     K = Matrix(
-        ((alpha_u, skew, u_0),
-         (0, alpha_v, v_0),
+        ((s_u, skew, u_0),
+         (0, s_v, v_0),
          (0, 0, 1)))
     return K
 
@@ -71,20 +88,18 @@ def get_3x4_RT_matrix_from_blender(cam):
     # Transpose since the rotation is object rotation,
     # and we want coordinate rotation
     # R_world2bcam = cam.rotation_euler.to_matrix().transposed()
-    # T_world2bcam = -1*R_world2bcam * location
+    # T_world2bcam = -1*R_world2bcam @ location
     #
     # Use matrix_world instead to account for all constraints
-    # location, rotation = cam.matrix_world.decompose()[0:2]
-    location, rotation = cam.matrix_basis.decompose()[0:2]
+    location, rotation = cam.matrix_world.decompose()[0:2]
     R_world2bcam = rotation.to_matrix().transposed()
 
     # Convert camera location to translation vector used in coordinate changes
-    # T_world2bcam = -1*R_world2bcam*cam.location
+    # T_world2bcam = -1*R_world2bcam @ cam.location
     # Use location from matrix_world to account for constraints:
     T_world2bcam = -1 * R_world2bcam @ location
 
     # Build the coordinate transform matrix from world to computer vision camera
-    # NOTE: Use * instead of @ here for older versions of Blender
     R_world2cv = R_bcam2cv @ R_world2bcam
     T_world2cv = R_bcam2cv @ T_world2bcam
 
@@ -104,15 +119,24 @@ def get_3x4_P_matrix_from_blender(cam):
 
 
 # ----------------------------------------------------------
-# Alternate 3D coordinates to 2D pixel coordinate projection code
-# adapted from https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex?lq=1
-# to have the y axes pointing up and origin at the top-left corner
-def project_by_object_utils(cam, point):
-    scene = bpy.context.scene
-    co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
-    render_scale = scene.render.resolution_percentage / 100
-    render_size = (
-        int(scene.render.resolution_x * render_scale),
-        int(scene.render.resolution_y * render_scale),
-    )
-    return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
+if __name__ == "__main__":
+    # Insert your camera name here
+    cam = bpy.data.objects['Camera']
+    P, K, RT = get_3x4_P_matrix_from_blender(cam)
+    print("K")
+    print(K)
+    print("RT")
+    print(RT)
+    print("P")
+    print(P)
+
+    print("==== 3D Cursor projection ====")
+    pc = P @ bpy.context.scene.cursor.location
+    pc /= pc[2]
+    print("Projected cursor location")
+    print(pc)
+
+    # Bonus code: save the 3x4 P matrix into a plain text file
+    # Don't forget to import numpy for this
+    # nP = numpy.matrix(P)
+    # numpy.savetxt("/tmp/P3x4.txt", nP)  # to select precision, use e.g. fmt='%.2f
diff --git a/scripts/camera_proj_old.py b/scripts/camera_proj_old.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fd0b76f6bcdc6f2989ec4bba0de3cd38e07fe26
--- /dev/null
+++ b/scripts/camera_proj_old.py
@@ -0,0 +1,118 @@
+import bpy
+import bpy_extras
+from mathutils import Matrix
+from mathutils import Vector
+
+
+# ---------------------------------------------------------------
+# 3x4 P matrix from Blender camera
+# ---------------------------------------------------------------
+
+# Build intrinsic camera parameters from Blender camera data
+# From https://blender.stackexchange.com/a/38210/153600
+# See notes on this in
+# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
+def get_calibration_matrix_K_from_blender(camd):
+    f_in_mm = camd.lens
+    scene = bpy.context.scene
+    resolution_x_in_px = scene.render.resolution_x
+    resolution_y_in_px = scene.render.resolution_y
+    scale = scene.render.resolution_percentage / 100
+    sensor_width_in_mm = camd.sensor_width
+    sensor_height_in_mm = camd.sensor_height
+    pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
+    if (camd.sensor_fit == 'VERTICAL'):
+        # the sensor height is fixed (sensor fit is horizontal),
+        # the sensor width is effectively changed with the pixel aspect ratio
+        s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
+        s_v = resolution_y_in_px * scale / sensor_height_in_mm
+    else:  # 'HORIZONTAL' and 'AUTO'
+        # the sensor width is fixed (sensor fit is horizontal),
+        # the sensor height is effectively changed with the pixel aspect ratio
+        pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
+        s_u = resolution_x_in_px * scale / sensor_width_in_mm
+        s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
+
+    # Parameters of intrinsic calibration matrix K
+    alpha_u = f_in_mm * s_u
+    alpha_v = f_in_mm * s_v
+    u_0 = resolution_x_in_px * scale / 2
+    v_0 = resolution_y_in_px * scale / 2
+    skew = 0  # only use rectangular pixels
+
+    K = Matrix(
+        ((alpha_u, skew, u_0),
+         (0, alpha_v, v_0),
+         (0, 0, 1)))
+    return K
+
+
+# Returns camera rotation and translation matrices from Blender.
+#
+# There are 3 coordinate systems involved:
+#    1. The World coordinates: "world"
+#       - right-handed
+#    2. The Blender camera coordinates: "bcam"
+#       - x is horizontal
+#       - y is up
+#       - right-handed: negative z look-at direction
+#    3. The desired computer vision camera coordinates: "cv"
+#       - x is horizontal
+#       - y is down (to align to the actual pixel coordinates
+#         used in digital images)
+#       - right-handed: positive z look-at direction
+def get_3x4_RT_matrix_from_blender(cam):
+    # bcam stands for blender camera
+    R_bcam2cv = Matrix(
+        ((1, 0, 0),
+         (0, -1, 0),
+         (0, 0, -1)))
+
+    # Transpose since the rotation is object rotation,
+    # and we want coordinate rotation
+    # R_world2bcam = cam.rotation_euler.to_matrix().transposed()
+    # T_world2bcam = -1*R_world2bcam * location
+    #
+    # Use matrix_world instead to account for all constraints
+    # location, rotation = cam.matrix_world.decompose()[0:2]
+    location, rotation = cam.matrix_basis.decompose()[0:2]
+    R_world2bcam = rotation.to_matrix().transposed()
+
+    # Convert camera location to translation vector used in coordinate changes
+    # T_world2bcam = -1*R_world2bcam*cam.location
+    # Use location from matrix_world to account for constraints:
+    T_world2bcam = -1 * R_world2bcam @ location
+
+    # Build the coordinate transform matrix from world to computer vision camera
+    # NOTE: Use * instead of @ here for older versions of Blender
+    R_world2cv = R_bcam2cv @ R_world2bcam
+    T_world2cv = R_bcam2cv @ T_world2bcam
+
+    # put into 3x4 matrix
+    RT = Matrix((
+        R_world2cv[0][:] + (T_world2cv[0],),
+        R_world2cv[1][:] + (T_world2cv[1],),
+        R_world2cv[2][:] + (T_world2cv[2],)
+    ))
+    return RT
+
+
+def get_3x4_P_matrix_from_blender(cam):
+    K = get_calibration_matrix_K_from_blender(cam.data)
+    RT = get_3x4_RT_matrix_from_blender(cam)
+    return K @ RT, K, RT
+
+
+# ----------------------------------------------------------
+# Alternate 3D coordinates to 2D pixel coordinate projection code
+# adapted from https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex?lq=1
+# to have the y axes pointing up and origin at the top-left corner
+def project_by_object_utils(cam, point):
+    scene = bpy.context.scene
+    co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
+    render_scale = scene.render.resolution_percentage / 100
+    render_size = (
+        int(scene.render.resolution_x * render_scale),
+        int(scene.render.resolution_y * render_scale),
+    )
+    return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
diff --git a/scripts/human.py b/scripts/human.py
index 676aeabdb3295241c84ea2418f7c5756c16150bd..b94903ee02e17a43f433ed4742d293e415399c53 100644
--- a/scripts/human.py
+++ b/scripts/human.py
@@ -6,7 +6,8 @@ import glob
 import os
 
 import numpy as np
-from mathutils import Vector
+from mathutils import Vector, Matrix
+import colorsys
 
 from scripts import utils
 
@@ -20,7 +21,7 @@ D = bpy.data
 # Load clothes list
 with open(r"mh_models\mh_mass_produce.json") as f:
     data = json.load(f)
-    hairs = data['hair']['allNames']
+    hairs = [name.replace(' ', '_') for name in data['hair']['allNames']]
     tops = []
     bottoms = []
     suits = []
@@ -40,10 +41,12 @@ class Human:
 
         self.init_textures()
         self.init_bones()
-        self.top = self.init_color_ramp(tops + suits)
-        self.bot = self.init_color_ramp(bottoms + suits)
+        self.top = self.init_color_clothes(tops + suits)
+        self.bot = self.init_color_clothes(bottoms + suits)
 
-    def init_color_ramp(self, clothes):
+        self.hairs = self.init_color_hairs(hairs)
+
+    def init_color_clothes(self, clothes):
         mat = None
         for obj in self.model.children:
             for clo in clothes:
@@ -58,6 +61,7 @@ class Human:
                     node_ramp = nodes.new("ShaderNodeValToRGB")
                     node_ramp.color_ramp.elements.remove(node_ramp.color_ramp.elements[0])
                     node_ramp.color_ramp.elements[0].position = 1.
+                    node_ramp.color_ramp.color_mode = 'HSV'
 
                     input_links = nodes['Principled BSDF'].inputs[0].links
                     if len(input_links):
@@ -73,12 +77,45 @@ class Human:
                     else:
                         mat.node_tree.links.new(node_ramp.outputs[0], nodes['Principled BSDF'].inputs[0])
 
-                    node_ramp.color_ramp.elements[0].color = [random.random() for _ in range(3)] + [1]
-                    node_ramp.color_ramp.elements[0].color = [random.random() for _ in range(3)] + [1]
+                    node_ramp.color_ramp.elements[0].color = random_HSV()
+                    node_ramp.color_ramp.elements[0].color = random_HSV()
                     break
 
         return mat
 
+    def init_color_hairs(self, hairs):
+        mat = None
+        for obj in self.model.children:
+            for hair in hairs:
+                if hair.lower() in obj.name.lower():
+                    if not len(obj.material_slots):
+                        break
+
+                    utils.select_only(obj)
+                    mat = obj.material_slots[0].material
+                    mat.use_nodes = True
+                    obj.active_material = mat
+                    nodes = mat.node_tree.nodes
+
+                    input_links = nodes['Principled BSDF'].inputs[0].links
+                    if len(input_links):
+                        img_node = nodes['Principled BSDF'].inputs[0].links[0].from_node
+
+                        node_hsv = nodes.new("ShaderNodeHueSaturation")
+                        node_hsv.inputs[1].default_value = 0.
+
+                        node_mix = nodes.new("ShaderNodeMixRGB")
+                        node_mix.blend_type = 'ADD'
+                        node_mix.inputs[0].default_value = 1.
+                        node_mix.inputs[2].default_value = random_color_hair()
+
+                        mat.node_tree.links.new(img_node.outputs[0], node_hsv.inputs[4])
+                        mat.node_tree.links.new(node_hsv.outputs[0], node_mix.inputs[1])
+                        mat.node_tree.links.new(node_mix.outputs[0], nodes['Principled BSDF'].inputs[0])
+
+                    break
+        return mat
+
     def init_bones(self):
         armature = bpy.data.armatures[self.model.name]
         bpy.ops.object.mode_set(mode='EDIT')
@@ -127,6 +164,9 @@ class Human:
             self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 5, -10))
 
         for i in '123':
+            self.model.pose.bones[f'spine_0{i}'].lock_ik_x = False
+            self.model.pose.bones[f'spine_0{i}'].use_ik_limit_x = True
+            self.model.pose.bones[f'spine_0{i}'].ik_min_x = utils.r(20) if i == '3' else 0
             self.model.pose.bones[f'spine_0{i}'].lock_ik_z = True
 
         bpy.ops.object.mode_set(mode='OBJECT')
@@ -147,23 +187,60 @@ class Human:
                 continue
 
     def __call__(self, car=None):
-        if self.top is not None:
-            node = self.top.node_tree.nodes["ColorRamp"]
-            node.color_ramp.elements[0].color = [random.random() for _ in range(3)] + [1, ]
-        else:
-            print('Top ', self.model.name)
+        for cloth, name in ((self.top, 'Top'), (self.bot, 'Bot')):
+            if cloth is not None:
+                cloth.node_tree.nodes["ColorRamp"].color_ramp.elements[0].color = random_HSV()
+                cloth.node_tree.nodes["Mix"].inputs[0].default_value = random.uniform(0.2, 0.6)
+            else:
+                print(name, self.model.name)
 
-        if self.bot is not None:
-            node = self.bot.node_tree.nodes["ColorRamp"]
-            node.color_ramp.elements[0].color = [random.random() for _ in range(3)] + [1, ]
+        if "Mix" in self.hairs.node_tree.nodes:
+            self.hairs.node_tree.nodes["Mix"].inputs[2].default_value = random_color_hair()
         else:
-            print('Bot ', self.model.name)
+            self.hairs.node_tree.nodes["Principled BSDF"].inputs[0].default_value = random_color_hair()
 
         set_bounds(self.model, car)
 
         return self.model
 
 
+def get_face(model):
+    # Compute approximate coordinates of face markers
+    previous_mode = bpy.context.mode
+    bpy.ops.object.mode_set(mode='OBJECT')
+    matrix_world = model.matrix_world.copy()
+    head = model.pose.bones["head"]
+
+    locations = {
+        'nose': (0, -0.2, 12.1),
+        'eye_l': (3.1, 3.5, 9),
+        'eye_r': (-3.1, 3.5, 9),
+        'hear_l': (7.6, 1.8, 0.5),
+        'hear_r': (-7.6, 1.8, 0.5),
+    }
+
+    locations = {k: Vector(v) / 10 for k, v in locations.items()}
+    face_3D = {k: matrix_world @ head.matrix @ v for k, v in locations.items()}
+
+    bpy.ops.object.mode_set(mode=previous_mode)
+    return face_3D
+
+
+def random_HSV():
+    color_hsv = [random.random() for _ in range(3)]
+    color_hsv[1] *= 0.8  # threshold saturation
+    rgb_color = colorsys.hsv_to_rgb(*color_hsv)
+
+    return list(rgb_color) + [1, ]
+
+
+def random_color_hair():
+    color_hsv = [random.uniform(0.06, 0.12), 0.7, random.random() ** 3 * 0.8]
+    rgb_color = colorsys.hsv_to_rgb(*color_hsv)
+
+    return list(rgb_color) + [1, ]
+
+
 def switch_constraints(model, enable=False):
     for s in 'lr':
         hand_ik = model.pose.bones[f'hand_{s}_IK']
diff --git a/scripts/random_pose.py b/scripts/random_pose.py
index 950befee9ecf380af44258aa00f7e9022fd5d647..8c3987506e82d0ecd324032e708d1cbbb77021fb 100644
--- a/scripts/random_pose.py
+++ b/scripts/random_pose.py
@@ -105,7 +105,7 @@ def hand_pose(pose, side, grasp=None):
             pose.bones[f'{finger}_{i + 1:02}_{side}'].rotation_euler.x = angles[i] * solo_ratio
 
 
-def random_pose_ik(subject, auto_ik=False, targets=None):
+def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
     """
         1- reset and fix legs
         2- randomize back
@@ -210,7 +210,14 @@ def random_pose_ik(subject, auto_ik=False, targets=None):
             temp_rota = rota(f'upperarm_{s}') + rota(f'lowerarm_{s}')
             # target = targets_test[s]
         else:
-            target = random.choice(targets[s])
+            if id_targets is None:
+                target = random.choice(targets[s])
+            else:
+                try:
+                    target = targets[s][id_targets[s]]
+                except IndexError as err:
+                    print(targets[s], id_targets, s)
+                    raise(err)
             pose.bones[f'hand_{s}_IK'].rotation_euler = Vector((0, 0, 0))
             pose.bones[f'hand_{s}_IK'].rotation_euler = (
                 ((matrix_world @ pose.bones[f'hand_{s}_IK'].matrix).inverted() @ target.matrix_world).to_euler())