diff --git a/bbox_3d.py b/bbox_3d.py
index 7f0241b9cda2fb86e15eebb807005ae0a26a1525..d0e06048c5d8c190ed7e6f92759406282f3552ea 100644
--- a/bbox_3d.py
+++ b/bbox_3d.py
@@ -1,7 +1,6 @@
 import numpy as np
 
-
-def get_bbox(ext):
+def get_3D_bbox(ext):
     pcd_bbox = np.array([
         [ext[0], ext[1], ext[2]],
         [- ext[0], ext[1], ext[2]],
@@ -12,17 +11,15 @@ def get_bbox(ext):
         [ext[0], - ext[1], - ext[2]],
         [- ext[0], - ext[1], - ext[2]]
     ])
-
     return pcd_bbox
 
-
 def generate_3d_bbox(data_name):
     objs = {"Banana": [0.029497003182768822, 0.15110498666763306, 0.060593008995056152],
             "Orange": [0.073495000600814819, 0.075856998562812805, 0.074581995606422424],
             "Pear": [0.066010989248752594, 0.12873399630188942, 0.06739199161529541]}
     for key, val in objs.items():
         ext = [x / 2 for x in val]
-        bbox = get_bbox(ext)
+        bbox = get_3D_bbox(ext)
         np.savetxt(f'{data_name}/Generated/Bbox/{key}_bbox_3d.txt', bbox)
         # print(ext)
 
diff --git a/compute_features.py b/compute_features.py
index c048b49970da3da9354a9ee88fedf3ac30468e87..e153c5a90e9652bf70f98f10196bb7d3b3b637fb 100644
--- a/compute_features.py
+++ b/compute_features.py
@@ -1,7 +1,4 @@
 
-
-
-import math
 import numpy as np
 import json
 from utils import compute_categories_id, compute_id_good_occ
@@ -15,29 +12,24 @@ from fps_alg import process2
 import os
 
 
-def updateJsonFile():
-    jsonFile = open("replayScript.json", "r") # Open the JSON file for reading
-    data = json.load(jsonFile) # Read the JSON into the buffer
-    jsonFile.close() # Close the JSON file
-
-    ## Working with buffered content
-    tmp = data["location"] 
-    data["location"] = path
-    data["mode"] = "replay"
-
-    ## Save our changes to JSON file
-    jsonFile = open("replayScript.json", "w+")
-    jsonFile.write(json.dumps(data))
-    jsonFile.close()
-
-
-def process_compute(data_name, camera, Nb_camera, World_begin, Nb_world, list_categories, occ_target, vis):
+def process_compute(data_name, camera, camera_resized, new_size, Nb_camera, World_begin, Nb_world, list_categories, occ_target, vis):
     transformation = np.matrix([[0.0000000, -1.0000000, 0.0000000],
                                 [0.0000000, 0.0000000, -1.0000000],
                                 [1.0000000, 0.0000000, 0.0000000]])
     
     list_count_categories = {}
+
+
     for i in range(World_begin, World_begin + Nb_world): # worlds
+
+        scenario = "Worlds"
+
+        if i > 4 :
+            destination_folder = f"Generated_{scenario}_Testing"
+        elif i > 3 :
+            destination_folder = f"Generated_{scenario}_Evaluating"
+        else :
+            destination_folder = f"Generated_{scenario}_Training"
         
         catergories_instance_array_id_to_cat, catergories_instance_array_cat_to_id, catergories_label_to_id = compute_categories_id(data_name, i)
         
@@ -56,56 +48,56 @@ def process_compute(data_name, camera, Nb_camera, World_begin, Nb_world, list_ca
             with open(f"{data_name}/Bbox_2d/{p}.json", 'r') as f:
                 data_Bbox_2d = json.load(f)
 
+            with open(f"{data_name}/Bbox_3d/{p}.json", 'r') as f:
+                data_Bbox_3d = json.load(f)
 
             if len(data_Bbox_2d) != len(data_3D_pose) :
                 raise TypeError("size of datas are differents !!")
             
-            if os.path.isfile(f'{data_name}/Generated/Count_{p-1}.json'):
-                with open(f'{data_name}/Generated/Count_{p-1}.json') as f:
+            if os.path.isfile(f'{data_name}/{destination_folder}/Count_{p-1}.json'):
+                with open(f'{data_name}/{destination_folder}/Count_{p-1}.json') as f:
                     list_count_categories = json.load(f)
 
-
             for categories in list_categories:
-
                 if categories in catergories_occ_array.keys():
-
-                    if len(catergories_occ_array[categories]) == 1 :
-
-                        meta = {}
-
-                        if categories in list_count_categories.keys():
-                            list_count_categories[categories] += 1
-                        else:
-                            list_count_categories[categories] = 1
-
-                        meta['id_generated'] = list_count_categories[categories] 
-                        meta['id_original'] = p 
-                        meta['id_category'] = catergories_label_to_id[categories] 
-                        meta['id_instance'] = catergories_occ_array[categories][0] 
-                        meta['id_dataset'] = 1
-                        meta['world'] = i
-                        meta['camera'] = f"grabber_{j}"
-                        meta['occlusion'] = occ_target
-                        meta['Nb_instance_category'] = 1
-
-
-                        if not os.path.isfile(f'{data_name}/Generated/Meta_Gen/{categories}/{categories}.json'):
-                            with open(f'{data_name}/Generated/Meta_Gen/{categories}/{categories}.json', mode='w') as f:
-                                feeds = {}
-                                feeds[meta['id_generated']]=meta
-                                f.write(json.dumps(feeds, indent=2))
-                        else:
-                            with open(f'{data_name}/Generated/Meta_Gen/{categories}/{categories}.json') as feedsjson:
-                                feeds = json.load(feedsjson)
+                    Nb_instance = len(catergories_occ_array[categories])
+
+                    meta = {}
+                    if categories in list_count_categories.keys():
+                        print("hello")
+                    else:
+                        #list_count_categories[categories] = {categories}
+                        list_count_categories[categories] = {}
+
+                    if Nb_instance in list_count_categories[categories].keys() : 
+                        list_count_categories[categories][Nb_instance] += 1
+                    else : 
+                        list_count_categories[categories][Nb_instance] = 1
+
+                    meta['id_generated'] = list_count_categories[categories][Nb_instance]
+                    meta['id_original'] = p
+                    meta['id_category'] = catergories_label_to_id[categories] 
+                    meta['id_instance'] = catergories_occ_array[categories]
+                    meta['id_dataset'] = 1
+                    meta['world'] = i
+                    meta['camera'] = f"grabber_{j}"
+                    meta['occlusion'] = occ_target
+                    meta['Nb_instance_category'] = Nb_instance
+
+                    if not os.path.isfile(f'{data_name}/{destination_folder}/{categories}/Meta_Gen/{categories}.json'):
+                        with open(f'{data_name}/{destination_folder}/{categories}/Meta_Gen/{categories}.json', mode='w') as f:
+                            feeds = {}
                             feeds[meta['id_generated']]=meta
-                            with open(f'{data_name}/Generated/Meta_Gen/{categories}/{categories}.json', mode='w') as f:
-                                f.write(json.dumps(feeds, indent=4))
-
-                        # with open(f'{data_name}/Generated/Meta_Gen/{categories}/{categories}.json', "a") as meta_file:
-                        #     json.dump(meta, meta_file, indent=4)
+                            f.write(json.dumps(feeds, indent=2))
+                    else:
+                        with open(f'{data_name}/{destination_folder}/{categories}/Meta_Gen/{categories}.json') as feedsjson:
+                            feeds = json.load(feedsjson)
+                            feeds[meta['id_generated']]=meta
+                        with open(f'{data_name}/{destination_folder}/{categories}/Meta_Gen/{categories}.json', mode='w') as f:
+                            f.write(json.dumps(feeds, indent=4))    
 
+                    if (Nb_instance == 1):
                         for k in range(len(data_3D_pose)):
-
                             if data_3D_pose[k]['id'] == catergories_occ_array[categories][0]:
                                 rpy = data_3D_pose[k]['pose']['rpy']
                                 rot = convert2(rpy)
@@ -116,13 +108,19 @@ def process_compute(data_name, camera, Nb_camera, World_begin, Nb_world, list_ca
                                 T_exp = transformation @ xyz
                                 T_exp = np.array(T_exp)
                                 num_arr = np.c_[R_exp, T_exp[0]]
-                                np.save(f'{data_name}/Generated/Pose_transformed/{categories}/{p}.npy', num_arr)  # save
+                                np.save(f'{data_name}/{destination_folder}/{categories}/Pose_transformed/{p}.npy', num_arr)  # save
                             else:
                                 continue
 
                             if data_Bbox_2d[k]['id'] == catergories_occ_array[categories][0]:
                                 bbox = bbox_2d(data_Bbox_2d[k])
-                                np.savetxt(f'{data_name}/Generated/Bbox/{categories}/{p}.txt', np.array(bbox).reshape((1, 4)))  # save
+                                np.savetxt(f'{data_name}/{destination_folder}/{categories}/Bbox/{p}.txt', np.array(bbox).reshape((1, 4)))  # save
+                            else:
+                                continue
+
+                            if data_Bbox_3d[k]['id'] == catergories_occ_array[categories][0]:
+                                bbox3d_size = data_Bbox_3d[k]['bbox']['size']
+                                np.savetxt(f'{data_name}/{destination_folder}/{categories}/Bbox_3d_Gen/{p}.txt', bbox3d_size)  # save
                             else:
                                 continue
 
@@ -130,38 +128,46 @@ def process_compute(data_name, camera, Nb_camera, World_begin, Nb_world, list_ca
                         img = cv2.imread(f"{data_name}/Instance_Segmentation/{p}.png", cv2.IMREAD_UNCHANGED) # plt.imread(path)
 
                         instance_img = instance(img, id)
-                        cv2.imwrite(f"{data_name}/Generated/Instance_Mask/{categories}/{p}.png", 255*instance_img)
-
+                        cv2.imwrite(f"{data_name}/{destination_folder}/{categories}/Instance_Mask/{p}.png", 255*instance_img)
+                        instance_img_resized = cv2.resize(instance_img, new_size)
+                        cv2.imwrite(f"{data_name}/{destination_folder}/{categories}/Instance_Mask_resized/{p}.png", 255*instance_img_resized)
 
-                        img = image.imread(f"{data_name}/RGB/{p}.png")
+                        img = cv2.imread(f"{data_name}/RGB/{p}.png")
+                        cv2.imwrite(f"{data_name}/{destination_folder}/{categories}/RGB_Gen/{p}.png", img)
+                        img_resized = cv2.resize(img, new_size)
+                        cv2.imwrite(f"{data_name}/{destination_folder}/{categories}/RGB_resized/{p}.png", img_resized)
 
                         np.set_printoptions(precision=15)
-                        pose = np.load(f'{data_name}/Generated/Pose_transformed/{categories}/{p}.npy')
-                        #print(pose)
+                        pose = np.load(f'{data_name}/{destination_folder}/{categories}/Pose_transformed/{p}.npy')
                         R_exp = pose[0:3, 0:3]
                         tVec = pose[0:3, 3]
 
-                        #print(tVec)
-                        # camera = np.matrix([[1386.4138492513919, 0.0, 960.5],
-                        #                     [0.0, 1386.4138492513919, 540.5],
-                        #                     [0.0, 0.0, 1.0]])
-
-                        
-                        fps_points = np.loadtxt(f'{data_name}/Generated/FPS/{categories}_fps_3d.txt')
-                        # process(pcd_bbox, pcd, R_exp, tVec, camera, img)
+                        fps_points = np.loadtxt(f'{data_name}/Generated/{categories}/{categories}_fps_3d.txt')
+                        center = fps_points.mean(0)
+                        fps_points = np.append(fps_points, [center], axis=0)
                         points = process2(fps_points, R_exp, tVec, camera, img, vis)
-                        out = np.zeros((1, 401))
-
-                        out[0] = catergories_occ_array[categories] #obj_id #len have to be 1 !!
+                        out = [int(catergories_occ_array[categories][0])] #len have to be 1 !!
                         ind = 1
                         for point in points:
-                            out[0][ind] = point[0][0] / img.shape[1]
-                            out[0][ind + 1] = point[0][1] / img.shape[0]
+                            x = point[0][0] / img.shape[1]
+                            y = point[0][1] / img.shape[0]
+                            out.append(x)
+                            out.append(y)
                             ind += 2
-                        np.savetxt(f'{data_name}/Generated/FPS/{categories}/{p}.txt', out)
-                        #print("stop")
-
-    with open(f'{data_name}/Generated/Count_{p}.json', mode='w') as f:
+                        np.savetxt(f'{data_name}/{destination_folder}/{categories}/FPS/{p}.txt',  np.array(out).reshape(1, len(out)))
+
+                        points_resized = process2(fps_points, R_exp, tVec, camera_resized, img_resized, vis)
+                        out_resized = [int(catergories_occ_array[categories][0])] #len have to be 1 !
+                        ind_resized = 1
+                        for point_resized in points_resized:
+                            x_resized = point_resized[0][0] / img_resized.shape[1]
+                            y_resized = point_resized[0][1] / img_resized.shape[0]
+                            out_resized.append(x_resized)
+                            out_resized.append(y_resized)
+                            ind_resized += 2
+                        np.savetxt(f'{data_name}/{destination_folder}/{categories}/FPS_resized/{p}.txt',  np.array(out_resized).reshape(1, len(out_resized)))
+        
+    with open(f'{data_name}/{destination_folder}/Count_{p}.json', mode='w') as f:
         f.write(json.dumps(list_count_categories, indent=4))
     print(list_count_categories)
 
diff --git a/fps_alg.py b/fps_alg.py
index 3d5f14c1a5ddbc77e10babbd3bec7ab16ad67c83..fa2f93f64d93135cbf2cd3086aa82850a6401336 100644
--- a/fps_alg.py
+++ b/fps_alg.py
@@ -108,13 +108,13 @@ def process2(pcd, R_exp, tVec, camera, img, vis= True):
     #     print(pcd_fps_numpy[n], '==>', keypoint_2d[0][n])
 
     if vis:
+        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         out = np.zeros((img.shape[0], img.shape[1], 16))
         fig, ax = plt.subplots()
         ax.imshow(img)
         for n in range(len(pcd_fps_numpy)):
             point = keypoint_2d[0][n]
             ax.plot(point[0][0], point[0][1], marker='.', color="red")
-
         plt.imshow(img)
         plt.show()
     return keypoint_2d[0]
diff --git a/main.py b/main.py
index cced98fe248e6fa9029e7ce80058911fe00e9bb2..d031724b04fce617f5cb677ce263fc5974ac19aa 100644
--- a/main.py
+++ b/main.py
@@ -1,33 +1,50 @@
 import os
 import numpy as np
-import json
 from prepare_data import reform_data
-#from pose import transform_pose
-#from bbox_2d import generate_2d_bbox
-#from instance_mask import generate_instance_mask
 from fps_alg import apply_fps
-from bbox_3d import generate_3d_bbox
+from bbox_3d import get_3D_bbox
 from compute_features import process_compute
-import shutil
 import open3d as o3d
-# Import the library
+from scipy.spatial import distance
 import argparse
 
-def generate_folders(name, list_categories):
+def generate_folders(name, list_categories, scenario):
     is_exist = os.path.exists(name)
     if not is_exist:
         os.mkdir(name)
-    folders = ["RGB", "RGB_Gen", "Meta_Gen", "Depth", "Mask", "Meta", "Pose", "Bbox_2d", "Bbox_2d_loose", "Instance_Segmentation", "Semantic_Segmentation", "Instance_Mask", "Occlusion", "Models", "Pose_transformed", "Bbox", "FPS"]
+    folders = ["RGB", "RGB_Gen", "RGB_resized", "Meta_Gen", "Depth", "Mask", "Meta", "Pose", "Bbox_2d", "Bbox_2d_loose", "Bbox_3d", "Bbox_3d_Gen",  "Instance_Segmentation", "Semantic_Segmentation", "Instance_Mask", "Instance_Mask_resized", "Occlusion", "Models", "Pose_transformed", "Bbox", "FPS", "FPS_resized"]
     for f in folders:
         is_exist = os.path.exists(f"{name}/{f}")
         if not is_exist:
-            if f not in ["RGB_Gen", "Instance_Mask", "Meta_Gen", "Models", "Pose_transformed", "Bbox", "FPS"]:
+            if f not in ["RGB_Gen", "RGB_resized",  "Instance_Mask", "Instance_Mask_resized", "Meta_Gen", "Models", "Pose_transformed", "Bbox", "Bbox_3d_Gen", "FPS" , "FPS_resized"]:
                 os.mkdir(f"{name}/{f}")
             else:
                 for cat in list_categories:
-                    is_exist2 = os.path.exists(f"{name}/Generated/{f}/{cat}")
+                    is_exist2 = os.path.exists(f"{name}/Generated/{cat}")
                     if not is_exist2:
-                        os.makedirs(f"{name}/Generated/{f}/{cat}")
+                        os.makedirs(f"{name}/Generated/{cat}")
+                    is_exist2 = os.path.exists(f"{name}/Generated_{scenario}_Training/{cat}/{f}")
+                    if not is_exist2:
+                        os.makedirs(f"{name}/Generated_{scenario}_Training/{cat}/{f}")
+                    is_exist2 = os.path.exists(f"{name}/Generated_{scenario}_Evaluating/{cat}/{f}")
+                    if not is_exist2:
+                        os.makedirs(f"{name}/Generated_{scenario}_Evaluating/{cat}/{f}")
+                    is_exist2 = os.path.exists(f"{name}/Generated_{scenario}_Testing/{cat}/{f}")
+                    if not is_exist2:
+                        os.makedirs(f"{name}/Generated_{scenario}_Testing/{cat}/{f}")
+
+
+
+def calc_pts_diameter2(pts):
+    """Calculates the diameter of a set of 3D points (i.e. the maximum distance
+  between any two points in the set). Faster but requires more memory than
+  calc_pts_diameter.
+  :param pts: nx3 ndarray with 3D points.
+  :return: The calculated diameter.
+  """
+    dists = distance.cdist(pts, pts, 'euclidean')
+    diameter = np.max(dists)
+    return diameter
 
 if __name__ == '__main__':    
     # Create the parser
@@ -38,20 +55,23 @@ if __name__ == '__main__':
     # Parse the argument
     args = parser.parse_args()
 
+    scenario = "Worlds"
+
     ### parameters ###
     Categories = [] # to read
     Nb_instance = 1
     occ_target = 0.5
+
     dataset_src = "/gpfsscratch/rech/uli/ubn15wo/dataset/s2rg/Fruits_all_medium/data/"
     #dataset_src = "/media/mahmoud/E/Fruits_easy/data"
+    #dataset_src = "/media/gduret/DATA/dataset/s2rg/Fruits_all_medium/data"
+
     choice = "low" # depth of rgb resolution datas
     data_options = {"high": "ground_truth_rgb",
                     "low": "ground_truth_depth"}
     dataset_type = data_options[choice]
     dataset_name = f"/gpfsscratch/rech/uli/ubn15wo/dataset/s2rg/Fruits_all_medium/GUIMOD_{choice}"
     list_categories = ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
-    # frame = "1_600000000"
-    #frame = "1_926000000"
     Nb_camera = 15
     #Nb_world = 10000
 
@@ -71,23 +91,60 @@ if __name__ == '__main__':
         #print(fps_points)
         np.savetxt(f'{dataset_name}/Generated/FPS/{categories}_fps_3d.txt', fps_points)
 
+    generate_folders(dataset_name, list_categories, scenario)
 
     if choice == 'high':
         camera = np.matrix([[1386.4138492513919, 0.0, 960.5],
                             [0.0, 1386.4138492513919, 540.5],
                             [0.0, 0.0, 1.0]])
-    else:
+        # (640/1920 = 1 / 3), (480/1080 = 4 / 9)
+        trans = np.matrix([[1 / 3, 0.0, 0.0],
+                        [0.0, (4 / 9), 0.0],
+                        [0.0, 0.0, 1.0]])
+    elif choice == 'low':
         camera = np.matrix([[1086.5054444841007, 0.0, 640.5],
                             [0.0, 1086.5054444841007, 360.5],
                             [0.0, 0.0, 1.0]])
+        # 
+        trans = np.matrix([[0.5, 0.0, 0.0],
+                        [0.0, (2 / 3), 0.0],
+                        [0.0, 0.0, 1.0]])
+
+    new_size = (640, 480)
+
+    new_camera = trans @ camera
 
-    np.savetxt(f'{dataset_name}/Generated/camera_{choice}.txt', camera)
+    #np.savetxt(f'{dataset_name}/Generated/camera_{choice}.txt', camera)
 
     reform_data(dataset_src, dataset_name, dataset_type, Nb_camera, args.World_begin, args.Nb_worlds)
 
-    process_compute(dataset_name, camera, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target, False)
-    #transform_pose(dataset_name, Nb_camera, Nb_world, list_categories, occ_target)
-    #generate_2d_bbox(dataset_name, Nb_camera, Nb_world, list_categories, occ_target)
-    #generate_instance_mask(dataset_name, Nb_camera, Nb_world, list_categories, occ_target)
-    #generate_fps(dataset_name, camera, Nb_camera, Nb_world, list_categories, occ_target, True)
-    #generate_3d_bbox(dataset_name)
+    list_categories = ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
+    objs = {"banana1": [ 0.02949700132012367249, 0.1511049866676330566, 0.06059300713241100311 ],
+            "kiwi1": [ 0.04908600077033042908, 0.07206099480390548706, 0.04909799993038177490 ],
+            "pear2": [ 0.06601099669933319092, 0.1287339925765991211, 0.06739201396703720093 ],
+            "strawberry1": [0.01698100194334983826, 0.02203200198709964752, 0.01685700193047523499],
+            "apricot": [0.04213499650359153748, 0.05482299625873565674, 0.04333199933171272278],
+            "orange2": [ 0.07349500805139541626, 0.07585700601339340210, 0.07458199560642242432 ],
+            "peach1": [ 0.07397901266813278198, 0.07111301273107528687, 0.07657301425933837891 ],
+            "lemon2": [0.04686100035905838013, 0.04684200137853622437, 0.07244800776243209839],
+            "apple2": [0.05203099921345710754, 0.04766000062227249146, 0.05089000239968299866]}
+
+    for categories in list_categories:
+        point_cloud = f"Models/{categories}/{categories.lower()}.ply"
+        pcd = o3d.io.read_point_cloud(point_cloud)
+
+        fps_points = apply_fps(pcd, 8)
+
+        np.savetxt(f'{dataset_name}/Generated/{categories}/{categories}_fps_3d.txt', fps_points)
+
+        point_cloud_in_numpy = np.asarray(pcd.points)
+        dim = calc_pts_diameter2(point_cloud_in_numpy) * 100
+        np.savetxt(f'{dataset_name}/Generated/{categories}/{categories}_diameter.txt', np.array([dim]))
+
+        size_bb = objs[categories]
+        ext = [x / 2 for x in size_bb]
+        bbox = get_3D_bbox(ext)
+        np.savetxt(f'{dataset_name}/Generated/{categories}/{categories}_bbox_3d.txt', bbox)  # save
+
+    process_compute(dataset_name, camera, new_camera, new_size, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target, False)
+
diff --git a/prepare_data.py b/prepare_data.py
index 1641a783bd38f9426a17350c5f62cd2c6f1fca67..8830d80b2bb81de78255b57eb7ea04cdd77ea24c 100644
--- a/prepare_data.py
+++ b/prepare_data.py
@@ -48,6 +48,11 @@ def reform_data(src, data_name, data_option, Nb_camera, World_begin, Nb_world):
             dst_bbox = f"{data_name}/Bbox_2d/{count}.json"
             shutil.copy(src_bbox, dst_bbox)
 
+            files_3D_bbox = os.listdir(f"{src}/{i}/grabber_{j}/{data_option}/3d_detection/")
+            src_3D_bbox = f"{src}/{i}/grabber_{j}/{data_option}/3d_detection/{files_3D_bbox[0]}"
+            dst_3D_bbox = f"{data_name}/Bbox_3d/{count}.json"
+            shutil.copy(src_3D_bbox, dst_3D_bbox)
+
             files_bbox_loose = os.listdir(f"{src}/{i}/grabber_{j}/{data_option}/2d_detection_loose/")
             src_bbox_loose = f"{src}/{i}/grabber_{j}/{data_option}/2d_detection_loose/{files_bbox_loose[0]}"
             dst_bbox_loose = f"{data_name}/Bbox_2d_loose/{count}.json"
diff --git a/resize.py b/resize.py
index 70cc608640bcb86bab7e2941ba3b7e98659a96c6..6174f8cb1398132192bb6f6d79f86cb5d92bf838 100644
--- a/resize.py
+++ b/resize.py
@@ -1,12 +1,26 @@
 from matplotlib import image
 import matplotlib.pyplot as plt
+from utils import compute_categories_id, compute_id_good_occ
 import cv2
 import numpy as np
-
+import argparse
 
 if __name__ == '__main__':
 
+
+
+    # Create the parser
+    parser = argparse.ArgumentParser()
+    # Add an argument
+    parser.add_argument('--Nb_worlds', type=int, required=True)
+    parser.add_argument('--World_begin', type=int, required=True)
+    # Parse the argument
+    args = parser.parse_args()
+
+
     choice = "low"
+    Nb_instance = 1
+    occ_target = 0.5
 
     if choice == 'high':
         camera = np.matrix([[1386.4138492513919, 0.0, 960.5],
@@ -28,27 +42,49 @@ if __name__ == '__main__':
 
     dataset_name = f"GUIMOD_{choice}"
     new_size = (640, 480)
+    Nb_camera = 15
+    list_categories = ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
+
+    for i in range(args.World_begin, args.World_begin + args.Nb_worlds): # worlds
+        
+        catergories_instance_array_id_to_cat, catergories_instance_array_cat_to_id, catergories_label_to_id = compute_categories_id(dataset_name, i)
+        
+        for j in range(1, Nb_camera+1): # cameras
+            p = ((i-1)*Nb_camera) + j
+
+            catergories_occ_array = compute_id_good_occ(dataset_name, p, catergories_instance_array_id_to_cat, catergories_instance_array_cat_to_id, occ_target)
+
+            #depth = cv2.resize(cv2.imread(f"{dataset_name}/Depth/{i}.tiff"), new_size)
+
+            for categories in list_categories:
+                if categories in catergories_occ_array.keys():
+                    if len(catergories_occ_array[categories]) == 1 :
+
+                        print(f"{dataset_name}/Generated/{categories}/RGB_Gen/{p}.png")
+                        rgb = cv2.resize(cv2.imread(f"{dataset_name}/Generated/{categories}/RGB_Gen/{p}.png"), new_size)
+                        cv2.imwrite(f"{dataset_name}/Generated/{categories}/RGB_resized/{p}.png", rgb)
 
-    for i in range(4995):
-        rgb = cv2.resize(cv2.imread(f"{dataset_name}/RGB/{i}.png"), new_size)
-        cv2.imwrite(f"rgb/{i}.png", rgb)
+                        #mask = cv2.resize(cv2.imread(f"{dataset_name}//Instance_Mask/{i}.png"), new_size)
+                        #cv2.imwrite(f"mask/{i}.png", mask*255)
 
-        mask = cv2.resize(cv2.imread(f"{dataset_name}/Mask/{i}.png"), new_size)
-        cv2.imwrite(f"mask/{i}.png", mask*255)
+                        print(f"{dataset_name}/Generated/{categories}/Instance_Mask/{p}.png")
+                        print("new_size",new_size)
+                        cat_mask = cv2.resize(cv2.imread(f"{dataset_name}/Generated/{categories}/Instance_Mask/{p}.png"), new_size)
 
-        depth = cv2.resize(cv2.imread(f"{dataset_name}/Depth/{i}.tiff"), new_size)
+                        print(f"{dataset_name}/Generated/{categories}/Instance_Mask_resized/{p}.png")
+                        cv2.imwrite(f"{dataset_name}/Generated/{categories}/Instance_Mask_resized/{p}.png", cat_mask)
 
-        banana_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/banana1/{i}.png"), new_size)
-        cv2.imwrite(f"banana1_mask/{i}.png", banana_mask*255)
+                        # banana_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/banana1/{i}.png"), new_size)
+                        # cv2.imwrite(f"banana1_mask/{i}.png", banana_mask*255)
 
-        orange_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/orange2/{i}.png"), new_size)
-        cv2.imwrite(f"orange2_mask/{i}.png", orange_mask*255)
+                        # orange_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/orange2/{i}.png"), new_size)
+                        # cv2.imwrite(f"orange2_mask/{i}.png", orange_mask*255)
 
-        Pear_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/pear2/{i}.png"), new_size)
-        cv2.imwrite(f"pear2_mask/{i}.png", Pear_mask*255)
+                        # Pear_mask = cv2.resize(cv2.imread(f"{dataset_name}/Instance_Mask/pear2/{i}.png"), new_size)
+                        # cv2.imwrite(f"pear2_mask/{i}.png", Pear_mask*255)
 
 
-        print("Done")
+            print("Done")
 
         # Check 2d bbox to resize it
 
diff --git a/test_resize.py b/test_resize.py
index 58177e3b28c9f070922cbad2000c4f8b2bf8d505..c014c5975f1ef22a590f3e70f8476481789afea2 100644
--- a/test_resize.py
+++ b/test_resize.py
@@ -166,7 +166,6 @@ def generate_fps(data_name, camera, vis=False, resize=False):
     # Read the point cloud
 
     for obj in ["Banana"]:
-        sfefcij
         obj_id = 0
         point_cloud = f'{data_name}/Models/{obj}/{obj.lower()}.ply'
         pcd = o3d.io.read_point_cloud(point_cloud)
diff --git a/utils.py b/utils.py
index 96e49e33cdf33cce9058ef1e5029b4f1e09d8cad..2e09900181b6c4ba0515ce9fb69945c4b8af7759 100644
--- a/utils.py
+++ b/utils.py
@@ -57,8 +57,11 @@ def compute_id_good_occ(data_name, count, catergories_instance_array_id_to_cat,
         #print(cat)
         catergories_occ_array[cat] = []
 
+    print(data2)
+
     for i in data2:
-        if i['occlusion_value'] > 0.5 :
+        if i['occlusion_value'] >= Occ_wanted :
+            print(i['id'])
             catergories_occ_array[catergories_instance_array_id_to_cat[i['id']]].append(i['id'])
 
     print(catergories_occ_array)