Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • gduret/DenseFusion
1 result
Show changes
Commits on Source (6)
from ultralytics import YOLO
import cv2
import matplotlib.pyplot as plt
import argparse
import os
# Create the parser
parser = argparse.ArgumentParser()
# Add an argument
parser.add_argument('--path_evaluation', type=str, required=True)
parser.add_argument('--path_result', type=str, required=True)
parser.add_argument('--class_object', type=str, required=True)
parser.add_argument('--path_model_yolo', type=str, required=True)
# Parse the argument
args = parser.parse_args()
# load the model.
model = YOLO(f"{args.path_model_yolo}")
#
path_evaluation_data = args.path_evaluation + "/" + args.class_object + "/RGB_resized"
for files in os.listdir(path_evaluation_data):
#print("files : ", files)
#print(f"{path_evaluation_data}/{files}")
try:
results = model.predict(source=f"{path_evaluation_data}/{files}", conf=0.5,
save=True)
#print(results)
results1 = results[0].to('cpu')
results11 = results1.numpy()
# boxes = results11.boxes # Boxes object for bbox outputs
# probs = results11.probs # Class probabilities for classification outputs
#
masks = results11.masks # Masks object for segmentation masks outputs
#print(masks)
mask_res = masks.data[0]
plt.imsave(f"{args.path_result}/{files}", mask_res, cmap='gray')
print("images saved : ", files )
except :
print("no prediction")
source diff could not be displayed: it is too large. Options to address this: view the blob.
source diff could not be displayed: it is too large. Options to address this: view the blob.
......@@ -24,12 +24,11 @@ import matplotlib.pyplot as plt
class PoseDataset(data.Dataset):
def __init__(self, mode, num, add_noise, root, noise_trans, refine):
# ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
#self.objlist = [1] #TODO
# apple, ....
#self.objlist = [1, 2, 3, 4, 5, 6, 7, 8]
self.objlist = [1]
# apple2 0 , apricot 1 , banana1 2 , kiwi1 3 , lemon2 4 , orange2 5 , peach1 6 , pear2 7
self.objlist = [1, 2, 3, 4, 5, 6, 7, 8]
#self.objlist = [1, 3, 6, 7, 8]
self.mode = mode
self.list_rgb = []
......@@ -94,8 +93,8 @@ class PoseDataset(data.Dataset):
self.border_list = [-1, 40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680]
self.num_pt_mesh_large = 500
self.num_pt_mesh_small = 500
self.symmetry_obj_idx = [1] # TODO
#self.symmetry_obj_idx = [0 ,1, 3, 4, 5, 6]
self.symmetry_obj_idx = [0, 1, 3, 4, 5, 6] # TODO
#self.symmetry_obj_idx = [0, 2, 3]
def __getitem__(self, index):
......@@ -130,8 +129,8 @@ class PoseDataset(data.Dataset):
mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
if self.mode == 'eval':
#mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
#mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
else:
#mask_label = ma.getmaskarray(ma.masked_equal(label, np.array([255, 255, 255])))[:, :, 0]
mask_label = ma.getmaskarray(ma.masked_equal(label, np.array(255)))
......@@ -193,6 +192,10 @@ class PoseDataset(data.Dataset):
model_points = self.pt[obj] / 1000.0
dellist = [j for j in range(0, len(model_points))]
#print("del :", len(dellist))
#print("len : ", len(model_points)-self.num_pt_mesh_small)
#print("model point : ", len(model_points))
#print("num_pt_mesh : ", self.num_pt_mesh_small)
dellist = random.sample(dellist, len(model_points) - self.num_pt_mesh_small)
model_points = np.delete(model_points, dellist, axis=0)
......
1: {diameter: 52.94260215554199, min_x: -26.221, min_y: -23.598, min_z: -25.405, size_x: 52.2, size_y: 47.816, size_z: 51.055},
2: {diameter: 55.64182762994041, min_x: -21.122, min_y: -28.445, min_z: -22.181, size_x: 42.491, size_y: 55.287, size_z: 43.699},
3: {diameter: 152.6314929798569, min_x: -14.428, min_y: -75.92699, min_z: -23.056, size_x: 29.562, size_y: 151.43699, size_z: 60.727000000000004},
4: {diameter: 72.65268354988685, min_x: -24.562, min_y: -36.235, min_z: -24.574, size_x: 49.108000000000004, size_y: 72.094, size_z: 49.120000000000005},
5: {diameter: 72.58068124508065, min_x: -23.86, min_y: -23.435, min_z: -35.375, size_x: 46.884, size_y: 46.866, size_z: 72.48400000000001},
6: {diameter: 77.41278909198401, min_x: -36.512,min_y: -36.759, min_z: -37.344, size_x: 73.693, size_y: 76.06, size_z: 74.78200000000001},
7: {diameter: 76.63639853229013, min_x: -37.55, min_y: -35.435, min_z: -36.938, size_x: 74.026, size_y: 71.15899999999999, size_z: 76.622},
1: {diameter: 52.94260215554199, min_x: -26.221, min_y: -23.598, min_z: -25.405, size_x: 52.2, size_y: 47.816, size_z: 51.055}
2: {diameter: 55.64182762994041, min_x: -21.122, min_y: -28.445, min_z: -22.181, size_x: 42.491, size_y: 55.287, size_z: 43.699}
3: {diameter: 152.6314929798569, min_x: -14.428, min_y: -75.92699, min_z: -23.056, size_x: 29.562, size_y: 151.43699, size_z: 60.727000000000004}
4: {diameter: 72.65268354988685, min_x: -24.562, min_y: -36.235, min_z: -24.574, size_x: 49.108000000000004, size_y: 72.094, size_z: 49.120000000000005}
5: {diameter: 72.58068124508065, min_x: -23.86, min_y: -23.435, min_z: -35.375, size_x: 46.884, size_y: 46.866, size_z: 72.48400000000001}
6: {diameter: 77.41278909198401, min_x: -36.512,min_y: -36.759, min_z: -37.344, size_x: 73.693, size_y: 76.06, size_z: 74.78200000000001}
7: {diameter: 76.63639853229013, min_x: -37.55, min_y: -35.435, min_z: -36.938, size_x: 74.026, size_y: 71.15899999999999, size_z: 76.622}
8: {diameter: 129.85637252518683, min_x: -32.724, min_y: -52.947, min_z: -33.292, size_x: 66.189, size_y: 129.07999999999998, size_z: 67.57300000000001}
......@@ -7,5 +7,4 @@ export PYTHONUNBUFFERED="True"
export CUDA_VISIBLE_DEVICES=0
python3 ./tools/eval_linemod.py --dataset_root ./datasets/linemod/Linemod_preprocessed\
--model trained_checkpoints/linemod/pose_model_9_0.01310166542980859.pth\
--refine_model trained_checkpoints/linemod/pose_refine_model_493_0.006761023565178073.pth
\ No newline at end of file
--model trained_models/linemod8/pose_model_4_0.012983739659874712.pth --refine_model trained_models/linemod8/pose_refine_model_9_0.01186443073513208.pth
#!/bin/bash
#SBATCH --job-name=DF # nom du job
#SBATCH -A uli@gpu
#SBATCH --ntasks=1 # Nombre de processus MPI
#SBATCH --cpus-per-task=10 # nombre de threads OpenMP
#SBATCH --hint=nomultithread # pas d'hyperthreading
#SBATCH --qos=qos_gpu-t4
#SBATCH --gres=gpu:1
#SBATCH --time=100:00:00 # Temps d’exécution max
#SBATCH --output=Output/Pvnet%j.out # fichier de sortie
#SBATCH --error=Output/Pvnet%j.out # fichier d'erreur
# on se place dans le répertoire de soumission
cd ${SLURM_SUBMIT_DIR}
module purge
# chargement des modules
module load python/3.6.15
# echo des commandes lancées
set -x
cat_target=$1
echo $cat_target
conda activate NewDF
bash ./experiments/scripts/train_linemod.sh
......@@ -132,8 +132,10 @@ parser.add_argument('--model', type=str, default='', help='resume PoseNet model'
parser.add_argument('--refine_model', type=str, default='', help='resume PoseRefineNet model')
opt = parser.parse_args()
# num_objects = 5
num_objects = 8
objlist = [1, 2, 3, 4, 5, 6, 7, 8]
# objlist = [1, 3, 6, 7, 8]
objlist = [1, 2, 3, 4 ,5 ,6, 7, 8]
num_points = 500
iteration = 4
bs = 1
......@@ -143,7 +145,7 @@ cam_fx = 543.2527222420504 # TODO
cam_fy = 724.3369629894005 # TODO
# ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
map_id_obj = {
"""map_id_obj = {
1: 'banana1',
2: 'kiwi1',
3: 'pear2',
......@@ -152,7 +154,14 @@ map_id_obj = {
6: 'peach1',
7: 'lemon2',
8: 'apple2',
}
}"""
#map_id_obj = {1: 'apple2', 3: 'banana1'}
map_id_obj = {1: 'apple2', 2: 'apricot', 3: 'banana1', 4: 'kiwi1', 5:'lemon2', 6: 'orange2', 7: 'peach1', 8: 'pear2'}
# apple2, apricot, banana1, kiwi1, lemon2, orange2, peach1, pear2
#self.objlist = [1, 2, 3, 4, 5, 6, 7, 8]
#self.objlist = [1, 3, 6, 7, 8]
K = np.array([[cam_fx, 0, cam_cx], [0, cam_fy, cam_cy], [0, 0, 1]])
dataset_config_dir = 'datasets/linemod/dataset_config'
output_result_dir = 'experiments/eval_result/linemod'
......@@ -176,8 +185,9 @@ criterion = Loss(num_points_mesh, sym_list)
criterion_refine = Loss_refine(num_points_mesh, sym_list)
diameter = []
print('{0}/models_info.yml'.format(dataset_config_dir))
meta_file = open('{0}/models_info.yml'.format(dataset_config_dir), 'r')
meta = yaml.load(meta_file)
meta = yaml.load(meta_file, yaml.Loader)
for obj in objlist:
diameter.append(meta[obj]['diameter'] / 1000.0 * 0.1)
print(diameter)
......
......@@ -30,7 +30,7 @@ from lib.utils import setup_logger
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default = 'ycb', help='ycb or linemod')
parser.add_argument('--dataset_root', type=str, default = '', help='dataset root dir (''YCB_Video_Dataset'' or ''Linemod_preprocessed'')')
parser.add_argument('--batch_size', type=int, default = 8, help='batch size')
parser.add_argument('--batch_size', type=int, default = 32, help='batch size')
parser.add_argument('--workers', type=int, default = 10, help='number of data loading workers')
parser.add_argument('--lr', default=0.0001, help='learning rate')
parser.add_argument('--lr_rate', default=0.3, help='learning rate decay rate')
......@@ -40,7 +40,7 @@ parser.add_argument('--decay_margin', default=0.016, help='margin to decay lr &
parser.add_argument('--refine_margin', default=0.013, help='margin to start the training of iterative refinement')
parser.add_argument('--noise_trans', default=0.03, help='range of the random noise of translation added to the training data')
parser.add_argument('--iteration', type=int, default = 2, help='number of refinement iterations')
parser.add_argument('--nepoch', type=int, default=500, help='max number of epochs to train')
parser.add_argument('--nepoch', type=int, default=60, help='max number of epochs to train')
parser.add_argument('--resume_posenet', type=str, default = '', help='resume PoseNet model')
parser.add_argument('--resume_refinenet', type=str, default = '', help='resume PoseRefineNet model')
parser.add_argument('--start_epoch', type=int, default = 1, help='which epoch to start')
......@@ -59,12 +59,12 @@ def main():
opt.log_dir = 'experiments/logs/ycb' #folder to save logs
opt.repeat_epoch = 1 #number of repeat times for one epoch training
elif opt.dataset == 'linemod':
#opt.num_objects = 8 #TODO
opt.num_objects = 1 #TODO
opt.num_objects = 8 #TODO
#opt.num_objects = 5 #TODO
opt.num_points = 500
opt.outf = 'trained_models/linemod'
opt.log_dir = 'experiments/logs/linemod'
opt.repeat_epoch = 20
opt.outf = 'trained_models/linemod8'
opt.log_dir = 'experiments/logs/linemod8'
opt.repeat_epoch = 5
else:
print('Unknown dataset')
return
......