Skip to content
Snippets Groups Projects
Commit 76b7e7f6 authored by Guillaume Duret's avatar Guillaume Duret
Browse files

add depth processing

parent 6b02ec31
No related branches found
No related tags found
No related merge requests found
...@@ -10,7 +10,7 @@ from pose import convert2 ...@@ -10,7 +10,7 @@ from pose import convert2
from matplotlib import image from matplotlib import image
from fps_alg import process2 from fps_alg import process2
import os import os
from PIL import Image
def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, World_begin, Nb_world, list_categories, occ_target, vis): def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, World_begin, Nb_world, list_categories, occ_target, vis):
transformation = np.matrix([[0.0000000, -1.0000000, 0.0000000], transformation = np.matrix([[0.0000000, -1.0000000, 0.0000000],
...@@ -88,7 +88,7 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl ...@@ -88,7 +88,7 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl
if categories in categories_occ_array.keys(): if categories in categories_occ_array.keys():
Nb_instance = len(categories_array[categories]) Nb_instance = len(categories_array[categories])
Nb_instance_occ = len(categories_occ_array[categories]) Nb_instance_occ = len(categories_occ_array[categories])
for scenario_loop in scenarios: for scenario_loop in scenarios:
meta = {} meta = {}
...@@ -155,6 +155,21 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl ...@@ -155,6 +155,21 @@ def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, Worl
id = categories_occ_array[categories][0] id = categories_occ_array[categories][0]
img = cv2.imread(f"{data_name}/Instance_Segmentation/{p}.png", cv2.IMREAD_UNCHANGED) # plt.imread(path) img = cv2.imread(f"{data_name}/Instance_Segmentation/{p}.png", cv2.IMREAD_UNCHANGED) # plt.imread(path)
depth = Image.open(f"{data_name}/Depth/{p}.tiff")
print(f"{data_name}/Depth/{p}.tiff")
for scenario_loop in scenarios:
if not destination_folders[scenario_loop] == "dont_save" :
depth_array = np.asarray(depth.getdata()).reshape(depth.size[1], depth.size[0])
depth1 = cv2.resize(np.array(depth), new_size)
depth2 = depth1 * 1000
depth3 = depth2.astype(np.uint32)
resized = Image.fromarray(depth3)
depth_high = Image.fromarray(depth_array)
#img2 = np.asarray(res.getdata()).reshape(res.size[1], res.size[0])
print(f"{data_name}/{destination_folders[scenario_loop]}/{categories}/Depth_resized/{p}.png")
#depth_high.save(f"{data_name}/{destination_folders[scenario_loop]}/{categories}/Depth_Gen/{p}.png")
resized.save(f"{data_name}/{destination_folders[scenario_loop]}/{categories}/Depth_resized/{p}.png")
instance_img = instance(img, id) instance_img = instance(img, id)
for scenario_loop in scenarios: for scenario_loop in scenarios:
......
...@@ -8,16 +8,17 @@ import open3d as o3d ...@@ -8,16 +8,17 @@ import open3d as o3d
from scipy.spatial import distance from scipy.spatial import distance
import argparse import argparse
def generate_folders(name, list_categories, scenario): def generate_folders(name, list_categories, scenario):
is_exist = os.path.exists(name) is_exist = os.path.exists(name)
if not is_exist: if not is_exist:
os.mkdir(name) os.mkdir(name)
folders = ["RGB", "RGB_Gen", "RGB_resized", "Meta_Gen", "Depth", "Mask", "Meta", "Pose", "Bbox_2d", "Bbox_2d_loose", "Bbox_3d", "Bbox_3d_Gen", "Instance_Segmentation", "Semantic_Segmentation", "Instance_Mask", "Labels", "Instance_Mask_resized", "Occlusion", "Models", "Pose_transformed", "Bbox", "FPS", "FPS_resized"] folders = ["RGB", "RGB_Gen", "RGB_resized", "Meta_Gen", "Depth", "Depth_Gen", "Depth_resized", "Mask", "Meta", "Pose", "Bbox_2d", "Bbox_2d_loose", "Bbox_3d", "Bbox_3d_Gen", "Instance_Segmentation", "Semantic_Segmentation", "Instance_Mask", "Labels", "Instance_Mask_resized", "Occlusion", "Models", "Pose_transformed", "Bbox", "FPS", "FPS_resized"]
for f in folders: for f in folders:
is_exist = os.path.exists(f"{name}/{f}") is_exist = os.path.exists(f"{name}/{f}")
if not is_exist: if not is_exist:
if f not in ["RGB_Gen", "RGB_resized", "Instance_Mask", "Labels", "Instance_Mask_resized", "Meta_Gen", "Models", "Pose_transformed", "Bbox", "Bbox_3d_Gen", "FPS" , "FPS_resized"]: if f not in ["RGB_Gen", "RGB_resized", "Depth", "Depth_Gen", "Depth_resized", "Instance_Mask", "Labels", "Instance_Mask_resized", "Meta_Gen", "Models", "Pose_transformed", "Bbox", "Bbox_3d_Gen", "FPS" , "FPS_resized"]:
os.mkdir(f"{name}/{f}") os.mkdir(f"{name}/{f}") # general data not dependent of category
else: else:
for cat in list_categories: for cat in list_categories:
is_exist2 = os.path.exists(f"{name}/Generated/{cat}") is_exist2 = os.path.exists(f"{name}/Generated/{cat}")
...@@ -60,8 +61,10 @@ if __name__ == '__main__': ...@@ -60,8 +61,10 @@ if __name__ == '__main__':
parser.add_argument('--Nb_worlds', type=int, required=True) parser.add_argument('--Nb_worlds', type=int, required=True)
parser.add_argument('--World_begin', type=int, required=True) parser.add_argument('--World_begin', type=int, required=True)
parser.add_argument('--dataset_id', type=str, required=True) parser.add_argument('--dataset_id', type=str, required=True)
parser.add_argument('--rearrange', type=bool, required=True) #parser.add_argument('--rearrange', dest='rearrange', default=False, action='store_true')
parser.add_argument('--compute', type=bool, required=True) #parser.add_argument('--compute', dest='compute', default=False, action='store_true')
parser.add_argument('--rearrange', type=str, required=True)
parser.add_argument('--compute', type=str, required=True)
# Parse the argument # Parse the argument
args = parser.parse_args() args = parser.parse_args()
...@@ -110,8 +113,10 @@ if __name__ == '__main__': ...@@ -110,8 +113,10 @@ if __name__ == '__main__':
new_camera = trans @ camera new_camera = trans @ camera
#np.savetxt(f'{dataset_name}/Generated/camera_{choice}.txt', camera) #np.savetxt(f'{dataset_name}/Generated/camera_{choice}.txt', camera)
print("rearrange", args.rearrange)
print("compute", args.compute)
if args.rearrange : if args.rearrange == 'yes':
reform_data(dataset_src, dataset_name, dataset_type, Nb_camera, args.World_begin, args.Nb_worlds) reform_data(dataset_src, dataset_name, dataset_type, Nb_camera, args.World_begin, args.Nb_worlds)
objs = {"banana1": [ 0.02949700132012367249, 0.1511049866676330566, 0.06059300713241100311 ], objs = {"banana1": [ 0.02949700132012367249, 0.1511049866676330566, 0.06059300713241100311 ],
...@@ -142,6 +147,6 @@ if __name__ == '__main__': ...@@ -142,6 +147,6 @@ if __name__ == '__main__':
bbox = get_3D_bbox(ext) bbox = get_3D_bbox(ext)
np.savetxt(f'{dataset_name}/Generated/{categories}/{categories}_bbox_3d.txt', bbox) # save np.savetxt(f'{dataset_name}/Generated/{categories}/{categories}_bbox_3d.txt', bbox) # save
if args.compute : if args.compute == 'yes' :
process_compute(dataset_name, camera, new_camera, new_size, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target, False) process_compute(dataset_name, camera, new_camera, new_size, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target, False)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment