Skip to content
Snippets Groups Projects
Commit 1baa861a authored by Romain Guesdon's avatar Romain Guesdon
Browse files

Clean repo

parent 512a6577
No related branches found
No related tags found
No related merge requests found
......@@ -16,4 +16,5 @@ backgrounds/*
*.bmp
*png
*.jpg
*.jpeg
\ No newline at end of file
*.jpeg
*.thumb
\ No newline at end of file
......@@ -5,7 +5,6 @@ import os
import sys
import shutil
from collections import OrderedDict
from math import prod
import time
import bpy
......@@ -113,9 +112,7 @@ def main():
C.scene.collection.objects.link(camera_object)
camera_object.rotation_mode = 'XYZ'
# camera_object.location = [-.97, -0.24, 0.68]
camera_object.location = [-.97, -0.1, 0.68]
# camera_rotation_0 = utils.r(Vector([72, 8, -75]))
camera_rotation_0 = utils.r(Vector([73, 8, -82]))
camera_object.rotation_euler = camera_rotation_0
......@@ -301,7 +298,6 @@ def main():
lbl for bone in bone_lbls for lbl in [bone + k for k in ['_head', '_tail']] if '_IK' not in lbl
]
face_lbls = ['nose', 'eye_l', 'eye_r', 'ear_l', 'ear_r']
# full_lbls = [lbl for lbl in bone_lbls if lbl not in face_lbls] + face_lbls
full_lbls = bone_lbls + face_lbls
annot_file_2D.write(
';'.join([lbl for bone in full_lbls for lbl in [bone + k for k in ['_x', '_y']]]) + '\n')
......
File deleted
import bpy
import bpy_extras
from mathutils import Matrix
from mathutils import Vector
# ---------------------------------------------------------------
# 3x4 P matrix from Blender camera
# ---------------------------------------------------------------
# Build intrinsic camera parameters from Blender camera data
# From https://blender.stackexchange.com/a/38210/153600
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
def get_calibration_matrix_K_from_blender(camd):
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px * scale / 2
v_0 = resolution_y_in_px * scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(
((alpha_u, skew, u_0),
(0, alpha_v, v_0),
(0, 0, 1)))
return K
# Returns camera rotation and translation matrices from Blender.
#
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def get_3x4_RT_matrix_from_blender(cam):
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
# location, rotation = cam.matrix_world.decompose()[0:2]
location, rotation = cam.matrix_basis.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1 * R_world2bcam @ location
# Build the coordinate transform matrix from world to computer vision camera
# NOTE: Use * instead of @ here for older versions of Blender
R_world2cv = R_bcam2cv @ R_world2bcam
T_world2cv = R_bcam2cv @ T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def get_3x4_P_matrix_from_blender(cam):
K = get_calibration_matrix_K_from_blender(cam.data)
RT = get_3x4_RT_matrix_from_blender(cam)
return K @ RT, K, RT
# ----------------------------------------------------------
# Alternate 3D coordinates to 2D pixel coordinate projection code
# adapted from https://blender.stackexchange.com/questions/882/how-to-find-image-coordinates-of-the-rendered-vertex?lq=1
# to have the y axes pointing up and origin at the top-left corner
def project_by_object_utils(cam, point):
scene = bpy.context.scene
co_2d = bpy_extras.object_utils.world_to_camera_view(scene, cam, point)
render_scale = scene.render.resolution_percentage / 100
render_size = (
int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale),
)
return Vector((co_2d.x * render_size[0], render_size[1] - co_2d.y * render_size[1]))
import sys
from math import cos, sin, pi, acos
import random
import json
import glob
import os
import numpy as np
from mathutils import Vector, Matrix
import colorsys
......@@ -166,9 +162,7 @@ class Human:
setattr(bone, f'ik_min_{axe}', utils.r(const[0]))
setattr(bone, f'ik_max_{axe}', utils.r(const[1]))
# self.model.pose.bones[f'hand_{s}_IK'].location = Vector((-15, 10, 0))
# self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 5, -10))
self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 1.2, 0.25))
self.model.pose.bones[f'hand_{s}_IK'].location = Vector((0, 1.2, 0.25))
for i in '123':
self.model.pose.bones[f'spine_0{i}'].lock_ik_x = False
......@@ -256,34 +250,9 @@ def switch_constraints(model, enable=False):
def set_bounds(model, car=None):
# set_floors(model, car)
set_shrinkwraps(model, car)
def set_floors(model, car=None):
original_mode = bpy.context.mode
utils.select_only(model)
bpy.ops.object.mode_set(mode='POSE')
planes = None
if car is not None:
planes = [ch for ch in car.children_recursive if ch.name[:5] == 'Plane']
for s in 'lr':
bone = model.pose.bones[f'hand_{s}_IK']
[bone.constraints.remove(constr) for constr in bone.constraints if 'Floor' in constr.name]
if car is None:
continue
for obj in planes:
constr = bone.constraints.new('FLOOR')
constr.target = obj
constr.floor_location = 'FLOOR_Z'
constr.use_rotation = True
constr.offset = 1
bpy.ops.object.mode_set(mode=original_mode)
def set_shrinkwraps(model, car=None):
original_mode = bpy.context.mode
utils.select_only(model)
......
import sys
from math import cos, sin, pi, acos
import random
from mathutils import Vector
import numpy as np
from scripts import utils
......@@ -74,14 +71,6 @@ def reset_subject(subject):
bone.rotation_euler = (0, 0, 0)
bpy.ops.object.mode_set(mode='OBJECT')
scale = round(subject.scale[0] * (1.6 + 0.3 * random.random()) / (subject.dimensions.y), 3)
# subject.scale = [scale] * 3
# subject.scale = [0.1] * 3
# height = subject.dimensions.y
# subject.location = [0, -0.16, -0.5 * 1.66]
# subject.location = [0, -0.16, -0.5 * height]
# subject.rotation_euler = r([65, 0, 0])
subject.scale = [0.9] * 3
sit_height = utils.get_head_pose('pelvis', subject).z * 0.88
subject.location = [0, -0.04, - sit_height]
......@@ -160,12 +149,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
pose.use_auto_ik = auto_ik
targets_test = {
'l': Vector((0.3, -0.1, 0.4)),
'r': Vector((-0.4, -0.1, 0.2))
}
back_rota_fact = sin(rota('spine_03').y) / sin(r(30))
for s in ['l', 'r']:
# Disconnect clavicle
armature = bpy.data.armatures[subject.name]
......@@ -181,7 +164,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
if targets is None:
target = Vector()
shoulder_pose = get_head_pose(f'upperarm_{s}', subject)
min_arm_factor = 0.2
back_forward_angle = rota('spine_03').x + rota('spine_01').x - r(30) # 0 = straight
......@@ -196,7 +178,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
theta = acos(costheta)
min_arm_factor = 0.2 + max(sin(back_forward_angle), 0)
# print(min_arm_factor, d(phi), d(theta))
u = random.uniform(min_arm_factor, 1) * arm_length
target.x = u * sin(theta) * cos(phi)
......@@ -206,9 +187,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
target += shoulder_pose
hand_pose(pose, side=s)
temp_rota = rota(f'upperarm_{s}') + rota(f'lowerarm_{s}')
# target = targets_test[s]
else:
if id_targets is None:
target = random.choice(targets[s])
......@@ -226,8 +204,6 @@ def random_pose_ik(subject, auto_ik=False, targets=None, id_targets=None):
target = target.location
location = get_head_pose(bone.name, subject)
# print(location)
# print(target)
bpy.ops.transform.translate(value=target - location)
if targets is not None:
......
......@@ -3,7 +3,6 @@ from math import pi
import bpy
import numpy as np
from mathutils import Matrix, Vector, Euler
# Maths utils
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment