diff --git a/tools/eval_linemod.py b/tools/eval_linemod.py
index 3d8d802f66c8b87aa83600309a503df3c8223654..c47d48d22b3cf2670c9130e8da6426ab66db54e9 100644
--- a/tools/eval_linemod.py
+++ b/tools/eval_linemod.py
@@ -21,25 +21,146 @@ from lib.loss import Loss
 from lib.loss_refiner import Loss_refine
 from lib.transformations import euler_matrix, quaternion_matrix, quaternion_from_matrix
 from lib.knn.__init__ import KNearestNeighbor
+from PIL import Image
+import cv2
+import math
+
+
+def draw_axis(img, rotation_vec, t, K, scale=0.1, dist=None):
+    """
+    Draw a 6dof axis (XYZ -> RGB) in the given rotation and translation
+    :param img - rgb numpy array
+    :rotation_vec - euler rotations, numpy array of length 3,
+                    use cv2.Rodrigues(R)[0] to convert from rotation matrix
+    :t - 3d translation vector, in meters (dtype must be float)
+    :K - intrinsic calibration matrix , 3x3
+    :scale - factor to control the axis lengths
+    :dist - optional distortion coefficients, numpy array of length 4. If None distortion is ignored.
+    """
+    # img = img.astype(np.float32)
+    dist = np.zeros(4, dtype=float) if dist is None else dist
+    points = scale * np.float32([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]).reshape(-1, 3)
+    axis_points, _ = cv2.projectPoints(points, rotation_vec, t, K, dist)
+    axis_points = np.asarray(axis_points, dtype='int')
+    img = cv2.line(img, tuple(axis_points[3].ravel()), tuple(axis_points[0].ravel()), (255, 0, 0), 3)
+    img = cv2.line(img, tuple(axis_points[3].ravel()), tuple(axis_points[1].ravel()), (0, 255, 0), 3)
+    img = cv2.line(img, tuple(axis_points[3].ravel()), tuple(axis_points[2].ravel()), (0, 0, 255), 3)
+    return img
+
+
+def draw_cube(img, rotation_vec, t, K, size, scale=0.1, dist=None):
+    # sizes donne un tableau avec les tailles selon chaque axe
+    # img = img.astype(np.float32)
+
+    dist = np.zeros(4, dtype=float) if dist is None else dist
+    x, y, z = size, size, size
+    points = scale * np.float32([[-x, -y, z], [-x, y, z], [x, y, z], [x, -y, z],
+                                 [-x, -y, -z], [-x, y, -z], [x, y, -z], [x, -y, -z]])
+    axis_points, _ = cv2.projectPoints(points, rotation_vec, t, K, dist)
+    axis_points = np.asarray(axis_points, dtype='int')
+
+    print(axis_points)
+    img = cv2.drawContours(img, [axis_points[:4]], -1, (0, 255, 0), 3)
+
+    for i, j in zip(range(4), range(4, 8)):
+        img = cv2.line(img, tuple(axis_points[i].ravel()), tuple(axis_points[j].ravel()), (255, 0, 0), 3)
+
+    # draw top layer in red color
+    img = cv2.drawContours(img, [axis_points[4:]], -1, (0, 0, 255), 3)
+    return img
+
+
+def generate_pose(rot, trans):
+    pose = quaternion_to_rotation_matrix(rot)
+    pose[3, :3] = trans
+    return pose
+
+
+def get_R_t(mat):
+    return mat[:3, :3], mat[:3, 3]
+
+
+def quaternion_to_rotation_matrix(quat):
+    q = quat.copy()
+    n = np.dot(q, q)
+    if n < np.finfo(q.dtype).eps:
+        return np.identity(4)
+    q = q * np.sqrt(2.0 / n)
+    q = np.outer(q, q)
+    rot_matrix = np.array(
+        [[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0], 0.0],
+         [q[1, 2] - q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0], 0.0],
+         [q[1, 3] + q[2, 0], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
+         [0.0, 0.0, 0.0, 1.0]],
+        dtype=q.dtype)
+    return rot_matrix
+
+
+def euler_from_quaternion(x, y, z, w):
+    """
+        Convert a quaternion into euler angles (roll, pitch, yaw)
+        roll is rotation around x in radians (counterclockwise)
+        pitch is rotation around y in radians (counterclockwise)
+        yaw is rotation around z in radians (counterclockwise)
+        """
+    t0 = +2.0 * (w * x + y * z)
+    t1 = +1.0 - 2.0 * (x * x + y * y)
+    roll_x = math.atan2(t0, t1)
+
+    t2 = +2.0 * (w * y - z * x)
+    t2 = +1.0 if t2 > +1.0 else t2
+    t2 = -1.0 if t2 < -1.0 else t2
+    pitch_y = math.asin(t2)
+
+    t3 = +2.0 * (w * z + x * y)
+    t4 = +1.0 - 2.0 * (y * y + z * z)
+    yaw_z = math.atan2(t3, t4)
+
+    return roll_x, pitch_y, yaw_z  # in radians
+
+
+flip_rot_matrix = np.array(
+    [[1, 0, 0, 0],
+     [0, -1, 0, 0],
+     [0, 0, -1, 0],
+     [0, 0, 0, 1]],
+    dtype=np.float32)
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--dataset_root', type=str, default = '', help='dataset root dir')
-parser.add_argument('--model', type=str, default = '',  help='resume PoseNet model')
-parser.add_argument('--refine_model', type=str, default = '',  help='resume PoseRefineNet model')
+parser.add_argument('--dataset_root', type=str, default='', help='dataset root dir')
+parser.add_argument('--model', type=str, default='', help='resume PoseNet model')
+parser.add_argument('--refine_model', type=str, default='', help='resume PoseRefineNet model')
 opt = parser.parse_args()
 
-num_objects = 13
-objlist = [1, 2, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]
+num_objects = 8
+objlist = [1, 2, 3, 4, 5, 6, 7, 8]
 num_points = 500
 iteration = 4
 bs = 1
+cam_cx = 320.25  # TODO
+cam_cy = 240.33333333333331  # TODO
+cam_fx = 543.2527222420504  # TODO
+cam_fy = 724.3369629894005  # TODO
+
+# ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
+map_id_obj = {
+    1: 'banana1',
+    2: 'kiwi1',
+    3: 'pear2',
+    4: 'strawberry1',
+    5: 'orange2',
+    6: 'peach1',
+    7: 'lemon2',
+    8: 'apple2',
+}
+K = np.array([[cam_fx, 0, cam_cx], [0, cam_fy, cam_cy], [0, 0, 1]])
 dataset_config_dir = 'datasets/linemod/dataset_config'
 output_result_dir = 'experiments/eval_result/linemod'
 knn = KNearestNeighbor(1)
 
-estimator = PoseNet(num_points = num_points, num_obj = num_objects)
+estimator = PoseNet(num_points=num_points, num_obj=num_objects)
 estimator.cuda()
-refiner = PoseRefineNet(num_points = num_points, num_obj = num_objects)
+refiner = PoseRefineNet(num_points=num_points, num_obj=num_objects)
 refiner.cuda()
 estimator.load_state_dict(torch.load(opt.model))
 refiner.load_state_dict(torch.load(opt.refine_model))
@@ -64,20 +185,19 @@ print(diameter)
 success_count = [0 for i in range(num_objects)]
 num_count = [0 for i in range(num_objects)]
 fw = open('{0}/eval_result_logs.txt'.format(output_result_dir), 'w')
-
 for i, data in enumerate(testdataloader, 0):
     points, choose, img, target, model_points, idx = data
+
     if len(points.size()) == 2:
         print('No.{0} NOT Pass! Lost detection!'.format(i))
         fw.write('No.{0} NOT Pass! Lost detection!\n'.format(i))
         continue
     points, choose, img, target, model_points, idx = Variable(points).cuda(), \
-                                                     Variable(choose).cuda(), \
-                                                     Variable(img).cuda(), \
-                                                     Variable(target).cuda(), \
-                                                     Variable(model_points).cuda(), \
-                                                     Variable(idx).cuda()
-
+        Variable(choose).cuda(), \
+        Variable(img).cuda(), \
+        Variable(target).cuda(), \
+        Variable(model_points).cuda(), \
+        Variable(idx).cuda()
     pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
     pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)
     pred_c = pred_c.view(bs, num_points)
@@ -89,11 +209,14 @@ for i, data in enumerate(testdataloader, 0):
     my_pred = np.append(my_r, my_t)
 
     for ite in range(0, iteration):
-        T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(num_points, 1).contiguous().view(1, num_points, 3)
+        T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(num_points,
+                                                                                         1).contiguous().view(1,
+                                                                                                              num_points,
+                                                                                                              3)
         my_mat = quaternion_matrix(my_r)
         R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3)
         my_mat[0:3, 3] = my_t
-        
+
         new_points = torch.bmm((points - T), R).contiguous()
         pred_r, pred_t = refiner(new_points, emb, idx)
         pred_r = pred_r.view(1, 1, -1)
@@ -120,6 +243,14 @@ for i, data in enumerate(testdataloader, 0):
     pred = np.dot(model_points, my_r.T) + my_t
     target = target[0].cpu().detach().numpy()
 
+    matrix = np.zeros((3, 4))
+    matrix[:3, :3] = my_r
+    matrix[:3, 3] = my_t
+    # np.save(f"results2/result_{i}.npy", matrix)
+    """
+   with open(f"results/result_{temp_id}.txt", "w") as f:
+        f.write(f"{matrix}")
+    """
     if idx[0].item() in sym_list:
         pred = torch.from_numpy(pred.astype(np.float32)).cuda().transpose(1, 0).contiguous()
         target = torch.from_numpy(target.astype(np.float32)).cuda().transpose(1, 0).contiguous()