diff --git a/compute_features.py b/compute_features.py index 938487c3343f77eb96ceaf63eed0b46c2ee4cbec..e6fe8221e8330837ff8a04fc6e33d081a9111ae6 100644 --- a/compute_features.py +++ b/compute_features.py @@ -12,7 +12,7 @@ from fps_alg import process2 import os from PIL import Image -def process_compute(data_path, full_path, camera, camera_resized, new_size, Nb_camera, World_begin, Nb_world, list_categories, occ_target, vis): +def process_compute(data_path, full_path, camera, camera_resized, new_size, Nb_camera, World_begin, Nb_world, list_categories, occ_target_min, occ_target_max=1, vis=False): transformation = np.matrix([[0.0000000, -1.0000000, 0.0000000], [0.0000000, 0.0000000, -1.0000000], [1.0000000, 0.0000000, 0.0000000]]) @@ -68,7 +68,7 @@ def process_compute(data_path, full_path, camera, camera_resized, new_size, Nb_c else : destination_folders["Mix_all"] = f"Generated_Mix_all_dont_save" - categories_array_filtered, categories_array_filtered_occ, categories_array_all, categories_array_all_occ = compute_id_good_occ(data_path, p, categories_instance_array_id_to_cat, categories_instance_array_cat_to_id, occ_target) + categories_array_filtered, categories_array_filtered_occ, categories_array_all, categories_array_all_occ = compute_id_good_occ(data_path, p, categories_instance_array_id_to_cat, categories_instance_array_cat_to_id, occ_target_min, occ_target_max) ### 3D Poses ### with open(f'{data_path}/Pose/{p}.json', 'r') as f: @@ -146,7 +146,8 @@ def process_compute(data_path, full_path, camera, camera_resized, new_size, Nb_c meta['Nb_instance'] = Nb_instance_all meta['all_id_instance'] = categories_array_all[categories] meta['all_id_instance_occlusion'] = categories_array_all_occ[categories] - meta['target_occlusion'] = occ_target + meta['target_occlusion_min'] = occ_target_min + meta['target_occlusion_max'] = occ_target_max meta['Nb_instance_filtered'] = Nb_instance_occ meta['id_instance_good'] = categories_array_filtered[categories] meta['id_instance_good_occlusion'] = categories_array_filtered_occ[categories] diff --git a/main.py b/main.py index 08fee91fc017db54f8fd9c65f48f5ac914600faf..df6a3836284fff92cd85d1ac6bd7528f0e5fef39 100644 --- a/main.py +++ b/main.py @@ -66,7 +66,8 @@ if __name__ == '__main__': parser.add_argument('--Nb_worlds', type=int, required=True) parser.add_argument('--World_begin', type=int, required=True) parser.add_argument('--dataset_id', type=str, default='', required=True) - parser.add_argument('--occlusion_target', type=float, default='', required=True) + parser.add_argument('--occlusion_target_min', type=float, default='', required=True) + parser.add_argument('--occlusion_target_max', type=float, default='', required=True) #parser.add_argument('--rearrange', dest='rearrange', default=False, action='store_true') #parser.add_argument('--compute', dest='compute', default=False, action='store_true') parser.add_argument('--rearrange', type=str, default='no', required=True) @@ -79,7 +80,8 @@ if __name__ == '__main__': ### parameters ### Categories = [] # to read Nb_instance = 1 - occ_target = args.occlusion_target + occ_target_min = args.occlusion_target_min + occ_target_max = args.occlusion_target_max dataset_src = f"/gpfsscratch/rech/uli/ubn15wo/DATA/data{args.dataset_id}" #dataset_src = "/media/gduret/DATA/dataset/s2rg/Fruits_all_medium/data" @@ -90,7 +92,7 @@ if __name__ == '__main__': dataset_type = data_options[choice] dataset_path = f"/gpfsscratch/rech/uli/ubn15wo/FruitBin{args.dataset_id}" #GUIMOD_New_{choice}_{args.dataset_id}" #dataset_path = f"/home/gduret/Documents/FruitBin{args.dataset_id}/" - dataset_name = f"FruitBin_{choice}_{Nb_instance}_{occ_target}" + dataset_name = f"FruitBin_{choice}_{Nb_instance}_{occ_target_min}_{occ_target_max}" #dataset_name = f"/gpfsscratch/rech/uli/ubn15wo/dataset_new{args.dataset_id}/s2rg/Fruits_all_medium/GUIMOD_{choice}" list_categories = ["banana1", "kiwi1", "pear2", "apricot", "orange2", "peach1", "lemon2", "apple2"] Nb_camera = 15 @@ -155,5 +157,5 @@ if __name__ == '__main__': np.savetxt(f'{dataset_path}/{dataset_name}/Generated/{categories}/{categories}_bbox_3d.txt', bbox) # save if args.compute == 'yes' : - process_compute(dataset_path, dataset_path+'/'+dataset_name, camera, new_camera, new_size, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target, False) + process_compute(dataset_path, dataset_path+'/'+dataset_name, camera, new_camera, new_size, Nb_camera, args.World_begin, args.Nb_worlds, list_categories, occ_target_min, occ_target_max, False) diff --git a/slurm/process_dataset.slurm b/slurm/process_dataset.slurm index 65a93b9d7c889b59781ee381bb74c47ad040a1cf..9927c7b5f357fa4eb2201ef2cd53a2fa7fde7084 100644 --- a/slurm/process_dataset.slurm +++ b/slurm/process_dataset.slurm @@ -1,17 +1,17 @@ #!/bin/bash -#SBATCH --job-name=Software # nom du job +#SBATCH --job-name=Process # nom du job #SBATCH -A uli@cpu #SBATCH --ntasks=1 # Nombre total de processus MPI #SBATCH --ntasks-per-node=1 # Nombre de processus MPI par noeud #SBATCH --cpus-per-task=2 #SBATCH --partition=cpu_p1 -#SBATCH --qos=qos_cpu-t4 +#SBATCH --qos=qos_cpu-t3 # /!\ Attention, la ligne suivante est trompeuse mais dans le vocabulaire # de Slurm "multithread" fait bien référence à l'hyperthreading. #SBATCH --hint=nomultithread # 1 processus MPI par coeur physique (pas d'hyperthreading) -#SBATCH --time=35:00:00 # Temps d’exécution maximum demande (HH:MM:SS) -#SBATCH --output=Output/postprocess%j.out # Nom du fichier de sortie -#SBATCH --error=Output/postprocess%j.out # Nom du fichier d'erreur (ici commun avec la sortie) +#SBATCH --time=20:00:00 # Temps d’exécution maximum demande (HH:MM:SS) +#SBATCH --output=Output/qpostprocess%j.out # Nom du fichier de sortie +#SBATCH --error=Output/qpostprocess%j.out # Nom du fichier d'erreur (ici commun avec la sortie) # on se place dans le répertoire de soumission cd ${SLURM_SUBMIT_DIR} @@ -31,10 +31,12 @@ set -x id_begin=$1 Nb=$2 id_dataset=$3 -rearrange=$4 -compute=$5 +occlusion_min=$4 +occlusion_max=$5 +rearrange=$6 +compute=$7 conda activate stvNet -time(python main.py --World_begin="$id_begin" --Nb_world="$Nb" --dataset_id="$id_dataset" --rearrange=$rearrange --compute=$compute) +time(python main.py --World_begin="$id_begin" --Nb_world="$Nb" --dataset_id="$id_dataset" --occlusion_target_min=$occlusion_min --occlusion_target_max=$occlusion_max --rearrange=$rearrange --compute=$compute) diff --git a/utils.py b/utils.py index 42265d33ca68db8c7f9987c670b8169888730a5d..1fd63c7ce5822c40516bb16e3e058ae484279b0d 100644 --- a/utils.py +++ b/utils.py @@ -9,6 +9,7 @@ def compute_categories_id(data_name, world): # returns JSON object as # a dictionary + print(f'{data_name}/Meta/{world}.json') data = json.load(f) # Iterating through the json @@ -35,7 +36,7 @@ def compute_categories_id(data_name, world): -def compute_id_good_occ(data_name, count, categories_instance_array_id_to_cat, categories_instance_array_cat_to_id, Occ_wanted): +def compute_id_good_occ(data_name, count, categories_instance_array_id_to_cat, categories_instance_array_cat_to_id, occ_target_min, occ_target_max): f2 = open(f'{data_name}/Occlusion/{count}.json') @@ -53,7 +54,7 @@ def compute_id_good_occ(data_name, count, categories_instance_array_id_to_cat, c categories_array_all_occ[cat] = [] for i in data2: - if i['occlusion_value'] >= Occ_wanted : + if i['occlusion_value'] > occ_target_min and i['occlusion_value'] <= occ_target_max : categories_array_filtered[categories_instance_array_id_to_cat[i['id']]].append(i['id']) categories_array_filtered_occ[categories_instance_array_id_to_cat[i['id']]].append(i['occlusion_value']) if i['occlusion_value'] >= 0.05 : @@ -69,4 +70,4 @@ def compute_id_good_occ(data_name, count, categories_instance_array_id_to_cat, c #print(categories_array_all_occ) - return categories_array_filtered, categories_array_filtered_occ, categories_array_all, categories_array_all_occ \ No newline at end of file + return categories_array_filtered, categories_array_filtered_occ, categories_array_all, categories_array_all_occ