diff --git a/global_script.py b/global_script.py index 84c1ebe94adc8e0c5cb201dd76e5bf8c1fa48c22..5dddcef7aecf7594fbff16f76f51dfd24bab5935 100644 --- a/global_script.py +++ b/global_script.py @@ -18,14 +18,14 @@ sys.path.append(WORKING_DIR) from scripts import random_pose from scripts import utils -from scripts import camera_proj +from scripts import camera_util from scripts import human import importlib importlib.reload(random_pose) importlib.reload(utils) -importlib.reload(camera_proj) +importlib.reload(camera_util) importlib.reload(human) from scripts.human import HumanLoader @@ -75,11 +75,11 @@ def main(): cars = [] for src_path, name in { - r"car_models\suv_car\car.blend": 'SUV', - r"car_models\red_car\red.blend": 'Red', - r"car_models\pickup_car\pickup.blend": 'PickUp', - r"car_models\family_car\family_car.blend": 'Family', - r"car_models\coupe_car\coupe_car.blend": 'Coupe', + r"car_models\suv_car\car_open.blend": 'SUV', + r"car_models\red_car\red_open.blend": 'Red', + r"car_models\pickup_car\pickup_open.blend": 'PickUp', + r"car_models\family_car\family_car_open.blend": 'Family', + r"car_models\coupe_car\coupe_car_open.blend": 'Coupe', r"car_models\truck\truck_open.blend": 'Truck', }.items(): @@ -112,9 +112,9 @@ def main(): C.scene.collection.objects.link(camera_object) camera_object.rotation_mode = 'XYZ' - camera_object.location = [-.97, -0.1, 0.68] - camera_rotation_0 = utils.r(Vector([73, 8, -82])) - camera_object.rotation_euler = camera_rotation_0 + cam_poser = camera_util.CamPoser([-.97, -0.1, 0.68], utils.r(Vector([73, 8, -82]))) + camera_object.location = cam_poser.def_loc + camera_object.rotation_euler = cam_poser.def_rot # set background back_folder = abs_path("backgrounds") @@ -141,7 +141,8 @@ def main(): image_holder = C.active_object image_holder.name = 'Image_holder' - image_holder.location = (4, 1.5, 1.3) + img_holder_location = (4, 1.5, 1.3) + image_holder.location = img_holder_location image_holder.rotation_euler.z = utils.r(-95) image_holder.active_material.shadow_method = 'NONE' @@ -186,6 +187,7 @@ def main(): fp = abs_path(r"output_temp") fp_img = os.path.join(fp, 'images') fp_ann_2D = os.path.join(fp, 'annots_2D') + fp_vis_2D = os.path.join(fp, 'annots_vis_2D') fp_ann_3D = os.path.join(fp, 'annots_3D') info_path = os.path.join(fp, 'infos.json') @@ -198,6 +200,7 @@ def main(): os.mkdir(fp_img) os.mkdir(fp_ann_2D) + os.mkdir(fp_vis_2D) os.mkdir(fp_ann_3D) frame_rate = 25 @@ -233,7 +236,8 @@ def main(): man = human_loader.next(car=car) else: # human.set_bounds(man, car) - man = man(car=car) + # man = man(car=car) + man.refresh(car=car) man_model = man.model @@ -263,13 +267,15 @@ def main(): image_holder.location.y = 1.5 + random.uniform(-0.3, 0.3) # Camera movement - camera_object.rotation_euler = camera_rotation_0 + utils.r(Vector([random.randint(-2, 2), 0, 0])) + camera_object.location, camera_object.rotation_euler = cam_poser.random() + # camera_data.angle = utils.r(random.uniform(50, 80)) + camera_data.angle = utils.r(random.uniform(40, 70)) C.scene.render.filepath = fp C.scene.render.image_settings.file_format = 'PNG' C.scene.camera = camera_object - P, K, RT = camera_proj.get_3x4_P_matrix_from_blender(camera_object) + P, K, RT = camera_util.get_3x4_P_matrix_from_blender(camera_object) file_root_name = f'{list(scenes_ids).index(human_path)}_{scenes_ids[human_path]}' @@ -293,6 +299,7 @@ def main(): man_model.animation_data_clear() # Exemple: 150k / 200 / 2 = 1500 poses with open(os.path.join(fp_ann_2D, f'annotations_{file_root_name}.csv'), 'w') as annot_file_2D, \ + open(os.path.join(fp_vis_2D, f'vis_{file_root_name}.csv'), 'w') as vis_file_2D, \ open(os.path.join(fp_ann_3D, f'annotations_{file_root_name}.csv'), 'w') as annot_file_3D: bone_lbls = list(man_model.pose.bones.keys()) bone_lbls = [ @@ -305,19 +312,25 @@ def main(): annot_file_3D.write( ';'.join( [lbl for bone in full_lbls for lbl in [bone + k for k in ['_X', '_Y', '_Z']]]) + '\n') + vis_file_2D.write(';'.join([lbl for lbl in full_lbls]) + '\n') for po in range(nb_pose): C.scene.frame_set(po * frame_rate) - use_targets = nb_pose - po - 1 < nb_targets + # Max 30% of driving images + use_targets = nb_pose - po - 1 < (min(int(0.3 * nb_pose), nb_targets)) # use_targets = False human.switch_constraints(man_model, enable=not use_targets) if nb_pose < nb_targets or not use_targets: id_targets = None else: - id_targets = {'l': (nb_pose - po - 1) % len(car_targets['l']), - 'r': (nb_pose - po - 1) // len(car_targets['l'])} - random_pose.random_pose_ik(man_model, targets=car_targets if use_targets else None, id_targets=id_targets) + if int(0.3 * nb_pose) < nb_targets: + id_targets = {k: random.randint(0, len(car_targets[k] - 1)) for k in 'lr'} + else: + id_targets = {'l': (nb_pose - po - 1) % len(car_targets['l']), + 'r': (nb_pose - po - 1) // len(car_targets['l'])} + random_pose.random_pose_ik(man_model, targets=car_targets if use_targets else None, + id_targets=id_targets) bpy.ops.object.mode_set(mode='OBJECT') @@ -340,6 +353,7 @@ def main(): annotations_2D = [] annotations_3D = [] + vis_2D = [] for lbl in bone_lbls: if '_tail' in lbl: bone_3d = utils.get_tail_pose(lbl[:-5], man_model) @@ -350,16 +364,20 @@ def main(): bone_2d = P @ bone_3d bone_2d /= bone_2d[-1] annotations_2D.append(f"{bone_2d[0]:.2f};{bone_2d[1]:.2f}") + vis_2D.append(1) + vis_face = human.get_vis_face(man_model, camera_object) for lbl, bone_3d in human.get_face(man_model).items(): annotations_3D.append(f"{bone_3d[0]:.3f};{bone_3d[1]:.3f};{bone_3d[2]:.3f}") bone_2d = P @ bone_3d bone_2d /= bone_2d[-1] annotations_2D.append(f"{bone_2d[0]:.2f};{bone_2d[1]:.2f}") + vis_2D.append(1 if vis_face[lbl] is None else 0) annot_file_2D.write(';'.join(annotations_2D) + '\n') annot_file_3D.write(';'.join(annotations_3D) + '\n') + vis_file_2D.write(';'.join([str(v) for v in vis_2D]) + '\n') with open(info_path, 'w') as f_infos: json.dump({ diff --git a/scripts/camera_proj.py b/scripts/camera_util.py similarity index 85% rename from scripts/camera_proj.py rename to scripts/camera_util.py index 8e8192457cd2bd1a8da5e6f0f0693f394c76b1ee..fb13e4663f8630e0e2d2c2b804447e2961a14bb3 100644 --- a/scripts/camera_proj.py +++ b/scripts/camera_util.py @@ -1,6 +1,36 @@ import bpy +import random +from math import cos, sin, atan, pi from mathutils import Matrix, Vector +from scripts import utils + +import importlib + +importlib.reload(utils) +from scripts.utils import * + + +class CamPoser: + def __init__(self, loc, rot): + self.def_loc = loc + self.def_rot = rot + + def random(self): + r = 0.98 + # alpha = utils.r(random.random() * 35) + alpha = utils.r(random.random() * 45 - 20) + loc = [-r * cos(alpha), -r * sin(alpha), random.randint(55, 80) / 100] + + rot = self.def_rot + rot[0] = atan(r / (loc[2] - 0.35)) + utils.r(random.randint(-15, 10)) + # rot[0] = atan(r / (loc[2] - 0.35)) + utils.r(random.randint(-15, 10)) + rot[1] = utils.r(random.randint(-5, 25)) + # rot[2] = -pi / 2 + alpha + rot[2] = -pi / 2 + alpha * random.uniform(0.75, 1) + + return loc, rot + # --------------------------------------------------------------- # 3x4 P matrix from Blender camera diff --git a/scripts/human.py b/scripts/human.py index 051390136869f913252e445d2b105b5f786fad47..7cb75036d33b1afcf35237a3df53d182c8a3ff60 100644 --- a/scripts/human.py +++ b/scripts/human.py @@ -195,7 +195,7 @@ class Human: for cloth, name in ((self.top, 'Top'), (self.bot, 'Bot')): if cloth is not None: cloth.node_tree.nodes["ColorRamp"].color_ramp.elements[0].color = random_HSV() - cloth.node_tree.nodes["Mix"].inputs[0].default_value = random.uniform(0.2, 0.6) + cloth.node_tree.nodes["Mix"].inputs[0].default_value = random.uniform(0.25, 0.75) else: print(name, self.model.name) @@ -203,7 +203,7 @@ class Human: self.hairs.node_tree.nodes["Mix"].inputs[2].default_value = random_color_hair() else: self.hairs.node_tree.nodes["Principled BSDF"].inputs[0].default_value = random_color_hair() - print(car) + set_bounds(self.model, car) @@ -225,9 +225,42 @@ def get_face(model): return face_3D +def get_vis_face(model, cam): + face_model = None + + ress = {} + for obj in model.children: + if 'bodyMesh' in obj.name: + face_model = obj + break + assert face_model is not None + mw = face_model.matrix_world + mwi = mw.inverted() + + ray_begin = mwi @ cam.location + for name in ['nose', 'eye_l', 'eye_r', 'ear_l', 'ear_r']: + head = utils.get_head_pose(name, model) + tail = utils.get_tail_pose(name, model) + if 'ear' in name: + point = tail + 0.25 * (tail - head) + elif 'eye' in name: + point = head + 0.5 * (tail - head) + elif 'nose' in name: + point = tail + 0.05 * (tail - head) + + print(name, point) + ray_end = mwi @ point + ray_direction = ray_end - ray_begin + ray_dist = ray_direction.length + ray_direction.normalize() + res = face_model.ray_cast(ray_begin, ray_direction, distance=ray_dist) + + ress[name] = mw @ res[1] if res[0] else None + return ress + def random_HSV(): color_hsv = [random.random() for _ in range(3)] - color_hsv[1] *= 0.8 # threshold saturation + # color_hsv[1] *= 0.8 # threshold saturation rgb_color = colorsys.hsv_to_rgb(*color_hsv) return list(rgb_color) + [1, ] diff --git a/scripts/random_pose.py b/scripts/random_pose.py index 4c27112ba74aadb0fe9ece8ecc55153cc70b2a2c..e2c8cbc2c742e05568095e6b254148e8d8860959 100644 --- a/scripts/random_pose.py +++ b/scripts/random_pose.py @@ -195,7 +195,7 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None): target = targets[s][id_targets[s]] except IndexError as err: print(targets[s], id_targets, s) - raise (err) + raise err pose.bones[f'hand_{s}_IK'].rotation_euler = Vector((0, 0, 0)) pose.bones[f'hand_{s}_IK'].rotation_euler = ( ((matrix_world @ pose.bones[f'hand_{s}_IK'].matrix).inverted() @ target.matrix_world).to_euler())