diff --git a/compute_features.py b/compute_features.py index e7afe776ef8f11a831e0dd4e765adbf5181d431d..babf84be459ba8f06bbd26e6f90e0cdbb11fbeac 100644 --- a/compute_features.py +++ b/compute_features.py @@ -29,9 +29,6 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl for destination_folder_loop in destination_folders_list[scenario_loop] : # [f"Generated_{scenario}_Testing", f"Generated_{scenario}_Evaluating", f"Generated_{scenario}_Training"] : list_count_categories[scenario_loop][destination_folder_loop] = {} - print("destination_folders : ", destination_folders_list) - print("list_count_categories : ", list_count_categories) - destination_folders = {} for i in range(World_begin, World_begin + Nb_world): # worlds @@ -70,8 +67,6 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl ### 3D Poses ### with open(f'{data_name}/Pose/{p}.json', 'r') as f: data_3D_pose = json.load(f) - #print(data) - #print("len(data)", len(data_3D_pose)) ### 2D BBox ### with open(f"{data_name}/Bbox_2d/{p}.json", 'r') as f: @@ -98,10 +93,8 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl meta = {} # if not destination_folders[scenario_loop] in list_count_categories[scenario_loop].keys(): - #list_count_categories[categories] = {categories} list_count_categories[scenario_loop][destination_folders[scenario_loop]] = {} if not categories in list_count_categories[scenario_loop][destination_folders[scenario_loop]].keys(): - #list_count_categories[categories] = {categories} list_count_categories[scenario_loop][destination_folders[scenario_loop]][categories] = {} if f"{Nb_instance}_instances" in list_count_categories[scenario_loop][destination_folders[scenario_loop]][categories].keys() : list_count_categories[scenario_loop][destination_folders[scenario_loop]][categories][f"{Nb_instance}_instances"] += 1 @@ -142,7 +135,6 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl T_exp = transformation @ xyz T_exp = np.array(T_exp) num_arr = np.c_[R_exp, T_exp[0]] - #for scenario_loop in scenarios: np.save(f'{data_name}/Generated/{categories}/Pose_transformed/{p}.npy', num_arr) # save else: continue @@ -222,19 +214,10 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl for scenario_loop in scenarios: - print("scenarios : " , scenarios) - print("scenario_loop : " , scenario_loop) - print ("destination_folders : ", destination_folders ) for destination_folder_loop in destination_folders_list[scenario_loop] : # [f"Generated_{scenario}_Testing", f"Generated_{scenario}_Evaluating", f"Generated_{scenario}_Training"] : - print("destination_folders[scenario_loop]", destination_folders_list[scenario_loop]) - print("destination_folder_loop", destination_folder_loop) with open(f'{data_name}/{destination_folder_loop}/Count_{p}.json', mode='w') as f: f.write(json.dumps(list_count_categories[scenario_loop][destination_folder_loop], indent=4)) - print(f'{data_name}/{destination_folders_list[scenario_loop]}/Count_{p}.json') - print(list_count_categories[scenario_loop][destination_folder_loop]) - - with open(f'{data_name}/Count_{p}.json', mode='w') as f: f.write(json.dumps(list_count_categories, indent=4)) - print(list_count_categories) + #print(list_count_categories) diff --git a/fps_alg.py b/fps_alg.py index fa2f93f64d93135cbf2cd3086aa82850a6401336..802a7ea4e01f5b052f20d7248443b470d300a4b8 100644 --- a/fps_alg.py +++ b/fps_alg.py @@ -132,8 +132,8 @@ def process(pcd_box, pcd2, R_exp, tVec, camera, img): keypoint_2d = cv2.projectPoints(pcd2_in_numpy, R_exp, tVec, camera, np.zeros(shape=[5, 1], dtype='float64')) keypoint_2d2 = cv2.projectPoints(pcd2_in_numpy2, R_exp, tVec, camera, np.zeros(shape=[5, 1], dtype='float64')) - for n in range(len(pcd2_in_numpy)): - print(pcd2_in_numpy[n], '==>', keypoint_2d[0][n]) + # for n in range(len(pcd2_in_numpy)): + # print(pcd2_in_numpy[n], '==>', keypoint_2d[0][n]) points = [] for n in range(len(pcd2_in_numpy)): @@ -203,5 +203,5 @@ def generate_fps(data_name, camera, Nb_camera, Nb_world, list_categories, occ_ta out[0][ind + 1] = point[0][1] / img.shape[0] ind += 2 np.savetxt(f'{data_name}/Generated/FPS/{categories}/{p}.txt', out) - print("stop") + diff --git a/pose.py b/pose.py index 6f8e477f506260722826c2b3d63b9c6fcc6379bc..6f29610c95e3042d8db920b78cb3b239ed3d200c 100644 --- a/pose.py +++ b/pose.py @@ -38,9 +38,6 @@ def compute_categories_id(data_name, world): # id_instances.append(i['id']) # print(i['id']) - print("catergories_instance_array_cat_to_id : ", catergories_instance_array_cat_to_id) - print("catergories_instance_array_id_to_cat : ", catergories_instance_array_id_to_cat) - # Closing file f.close() @@ -63,8 +60,6 @@ def compute_id_good_occ(data_name, count, catergories_instance_array_id_to_cat, if i['occlusion_value'] > 0.5 : catergories_occ_array[catergories_instance_array_id_to_cat[i['id']]].append(i['id']) - print(catergories_occ_array) - # Closing file f2.close() @@ -99,9 +94,6 @@ def transform_pose(data_name, Nb_camera, Nb_world, list_categories, occ_target): with open(f'{data_name}/Pose/{p}.json', 'r') as f: data = json.load(f) - #print(data) - print("len(data)", len(data)) - for k in range(len(data)): for categories in list_categories: diff --git a/test_fps.py b/test_fps.py index 9ed329ec5eb4d8f62ca35cd405e9be0283f43148..0a012a158e560f70568a4930443f957d0bcf1ac0 100644 --- a/test_fps.py +++ b/test_fps.py @@ -214,7 +214,7 @@ new = np.matrix([[0.0000000, -1.0000000, 0.0000000], t_org = pose[0:3, 3] tVec = new @ t_org -print(tVec) +#print(tVec) img = image.imread('/media/mahmoud/F/GUIMOD/data/1/grabber_1/color/image/0_0.png') camera = [[1386.4138492513919, 0.0, 960.5], [0.0, 1386.4138492513919, 540.5], [0.0, 0.0, 1.0]] diff --git a/test_resize.py b/test_resize.py index c014c5975f1ef22a590f3e70f8476481789afea2..84443b9312f564a0f3255e5d5debf643d5db0617 100644 --- a/test_resize.py +++ b/test_resize.py @@ -111,8 +111,8 @@ def process2(pcd, R_exp, tVec, camera, img, vis= True): pcd_fps_numpy = np.asarray(pcd) keypoint_2d = cv2.projectPoints(pcd_fps_numpy, R_exp, tVec, camera, np.zeros(shape=[8, 1], dtype='float64')) - for n in range(len(pcd_fps_numpy)): - print(pcd_fps_numpy[n], '==>', keypoint_2d[0][n]) + # for n in range(len(pcd_fps_numpy)): + # print(pcd_fps_numpy[n], '==>', keypoint_2d[0][n]) if vis: out = np.zeros((img.shape[0], img.shape[1], 16)) @@ -139,8 +139,8 @@ def process(pcd_box, pcd2, R_exp, tVec, camera, img): keypoint_2d = cv2.projectPoints(pcd2_in_numpy, R_exp, tVec, camera, np.zeros(shape=[5, 1], dtype='float64')) keypoint_2d2 = cv2.projectPoints(pcd2_in_numpy2, R_exp, tVec, camera, np.zeros(shape=[5, 1], dtype='float64')) - for n in range(len(pcd2_in_numpy)): - print(pcd2_in_numpy[n], '==>', keypoint_2d[0][n]) + # for n in range(len(pcd2_in_numpy)): + # print(pcd2_in_numpy[n], '==>', keypoint_2d[0][n]) points = [] for n in range(len(pcd2_in_numpy)): diff --git a/utils.py b/utils.py index 2e09900181b6c4ba0515ce9fb69945c4b8af7759..b8f4b334e003e027307742ec8f7f69a73a306112 100644 --- a/utils.py +++ b/utils.py @@ -36,9 +36,6 @@ def compute_categories_id(data_name, world): # id_instances.append(i['id']) # print(i['id']) - print("catergories_instance_array_cat_to_id : ", catergories_instance_array_cat_to_id) - print("catergories_instance_array_id_to_cat : ", catergories_instance_array_id_to_cat) - # Closing file f.close() @@ -57,15 +54,10 @@ def compute_id_good_occ(data_name, count, catergories_instance_array_id_to_cat, #print(cat) catergories_occ_array[cat] = [] - print(data2) - for i in data2: if i['occlusion_value'] >= Occ_wanted : - print(i['id']) catergories_occ_array[catergories_instance_array_id_to_cat[i['id']]].append(i['id']) - print(catergories_occ_array) - # Closing file f2.close()