Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
DenseFusion
Manage
Activity
Members
Labels
Plan
Issues
0
Issue boards
Milestones
Wiki
Code
Merge requests
0
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Guillaume Duret
DenseFusion
Commits
4726dcae
Commit
4726dcae
authored
2 years ago
by
Guillaume Duret
Browse files
Options
Downloads
Patches
Plain Diff
changes eval
parent
0086b123
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
tools/eval_linemod.py
+147
-16
147 additions, 16 deletions
tools/eval_linemod.py
with
147 additions
and
16 deletions
tools/eval_linemod.py
+
147
−
16
View file @
4726dcae
...
@@ -21,25 +21,146 @@ from lib.loss import Loss
...
@@ -21,25 +21,146 @@ from lib.loss import Loss
from
lib.loss_refiner
import
Loss_refine
from
lib.loss_refiner
import
Loss_refine
from
lib.transformations
import
euler_matrix
,
quaternion_matrix
,
quaternion_from_matrix
from
lib.transformations
import
euler_matrix
,
quaternion_matrix
,
quaternion_from_matrix
from
lib.knn.__init__
import
KNearestNeighbor
from
lib.knn.__init__
import
KNearestNeighbor
from
PIL
import
Image
import
cv2
import
math
def
draw_axis
(
img
,
rotation_vec
,
t
,
K
,
scale
=
0.1
,
dist
=
None
):
"""
Draw a 6dof axis (XYZ -> RGB) in the given rotation and translation
:param img - rgb numpy array
:rotation_vec - euler rotations, numpy array of length 3,
use cv2.Rodrigues(R)[0] to convert from rotation matrix
:t - 3d translation vector, in meters (dtype must be float)
:K - intrinsic calibration matrix , 3x3
:scale - factor to control the axis lengths
:dist - optional distortion coefficients, numpy array of length 4. If None distortion is ignored.
"""
# img = img.astype(np.float32)
dist
=
np
.
zeros
(
4
,
dtype
=
float
)
if
dist
is
None
else
dist
points
=
scale
*
np
.
float32
([[
1
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
],
[
0
,
0
,
0
]]).
reshape
(
-
1
,
3
)
axis_points
,
_
=
cv2
.
projectPoints
(
points
,
rotation_vec
,
t
,
K
,
dist
)
axis_points
=
np
.
asarray
(
axis_points
,
dtype
=
'
int
'
)
img
=
cv2
.
line
(
img
,
tuple
(
axis_points
[
3
].
ravel
()),
tuple
(
axis_points
[
0
].
ravel
()),
(
255
,
0
,
0
),
3
)
img
=
cv2
.
line
(
img
,
tuple
(
axis_points
[
3
].
ravel
()),
tuple
(
axis_points
[
1
].
ravel
()),
(
0
,
255
,
0
),
3
)
img
=
cv2
.
line
(
img
,
tuple
(
axis_points
[
3
].
ravel
()),
tuple
(
axis_points
[
2
].
ravel
()),
(
0
,
0
,
255
),
3
)
return
img
def
draw_cube
(
img
,
rotation_vec
,
t
,
K
,
size
,
scale
=
0.1
,
dist
=
None
):
# sizes donne un tableau avec les tailles selon chaque axe
# img = img.astype(np.float32)
dist
=
np
.
zeros
(
4
,
dtype
=
float
)
if
dist
is
None
else
dist
x
,
y
,
z
=
size
,
size
,
size
points
=
scale
*
np
.
float32
([[
-
x
,
-
y
,
z
],
[
-
x
,
y
,
z
],
[
x
,
y
,
z
],
[
x
,
-
y
,
z
],
[
-
x
,
-
y
,
-
z
],
[
-
x
,
y
,
-
z
],
[
x
,
y
,
-
z
],
[
x
,
-
y
,
-
z
]])
axis_points
,
_
=
cv2
.
projectPoints
(
points
,
rotation_vec
,
t
,
K
,
dist
)
axis_points
=
np
.
asarray
(
axis_points
,
dtype
=
'
int
'
)
print
(
axis_points
)
img
=
cv2
.
drawContours
(
img
,
[
axis_points
[:
4
]],
-
1
,
(
0
,
255
,
0
),
3
)
for
i
,
j
in
zip
(
range
(
4
),
range
(
4
,
8
)):
img
=
cv2
.
line
(
img
,
tuple
(
axis_points
[
i
].
ravel
()),
tuple
(
axis_points
[
j
].
ravel
()),
(
255
,
0
,
0
),
3
)
# draw top layer in red color
img
=
cv2
.
drawContours
(
img
,
[
axis_points
[
4
:]],
-
1
,
(
0
,
0
,
255
),
3
)
return
img
def
generate_pose
(
rot
,
trans
):
pose
=
quaternion_to_rotation_matrix
(
rot
)
pose
[
3
,
:
3
]
=
trans
return
pose
def
get_R_t
(
mat
):
return
mat
[:
3
,
:
3
],
mat
[:
3
,
3
]
def
quaternion_to_rotation_matrix
(
quat
):
q
=
quat
.
copy
()
n
=
np
.
dot
(
q
,
q
)
if
n
<
np
.
finfo
(
q
.
dtype
).
eps
:
return
np
.
identity
(
4
)
q
=
q
*
np
.
sqrt
(
2.0
/
n
)
q
=
np
.
outer
(
q
,
q
)
rot_matrix
=
np
.
array
(
[[
1.0
-
q
[
2
,
2
]
-
q
[
3
,
3
],
q
[
1
,
2
]
+
q
[
3
,
0
],
q
[
1
,
3
]
-
q
[
2
,
0
],
0.0
],
[
q
[
1
,
2
]
-
q
[
3
,
0
],
1.0
-
q
[
1
,
1
]
-
q
[
3
,
3
],
q
[
2
,
3
]
+
q
[
1
,
0
],
0.0
],
[
q
[
1
,
3
]
+
q
[
2
,
0
],
q
[
2
,
3
]
-
q
[
1
,
0
],
1.0
-
q
[
1
,
1
]
-
q
[
2
,
2
],
0.0
],
[
0.0
,
0.0
,
0.0
,
1.0
]],
dtype
=
q
.
dtype
)
return
rot_matrix
def
euler_from_quaternion
(
x
,
y
,
z
,
w
):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0
=
+
2.0
*
(
w
*
x
+
y
*
z
)
t1
=
+
1.0
-
2.0
*
(
x
*
x
+
y
*
y
)
roll_x
=
math
.
atan2
(
t0
,
t1
)
t2
=
+
2.0
*
(
w
*
y
-
z
*
x
)
t2
=
+
1.0
if
t2
>
+
1.0
else
t2
t2
=
-
1.0
if
t2
<
-
1.0
else
t2
pitch_y
=
math
.
asin
(
t2
)
t3
=
+
2.0
*
(
w
*
z
+
x
*
y
)
t4
=
+
1.0
-
2.0
*
(
y
*
y
+
z
*
z
)
yaw_z
=
math
.
atan2
(
t3
,
t4
)
return
roll_x
,
pitch_y
,
yaw_z
# in radians
flip_rot_matrix
=
np
.
array
(
[[
1
,
0
,
0
,
0
],
[
0
,
-
1
,
0
,
0
],
[
0
,
0
,
-
1
,
0
],
[
0
,
0
,
0
,
1
]],
dtype
=
np
.
float32
)
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'
--dataset_root
'
,
type
=
str
,
default
=
''
,
help
=
'
dataset root dir
'
)
parser
.
add_argument
(
'
--dataset_root
'
,
type
=
str
,
default
=
''
,
help
=
'
dataset root dir
'
)
parser
.
add_argument
(
'
--model
'
,
type
=
str
,
default
=
''
,
help
=
'
resume PoseNet model
'
)
parser
.
add_argument
(
'
--model
'
,
type
=
str
,
default
=
''
,
help
=
'
resume PoseNet model
'
)
parser
.
add_argument
(
'
--refine_model
'
,
type
=
str
,
default
=
''
,
help
=
'
resume PoseRefineNet model
'
)
parser
.
add_argument
(
'
--refine_model
'
,
type
=
str
,
default
=
''
,
help
=
'
resume PoseRefineNet model
'
)
opt
=
parser
.
parse_args
()
opt
=
parser
.
parse_args
()
num_objects
=
13
num_objects
=
8
objlist
=
[
1
,
2
,
4
,
5
,
6
,
8
,
9
,
10
,
11
,
12
,
13
,
14
,
15
]
objlist
=
[
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
]
num_points
=
500
num_points
=
500
iteration
=
4
iteration
=
4
bs
=
1
bs
=
1
cam_cx
=
320.25
# TODO
cam_cy
=
240.33333333333331
# TODO
cam_fx
=
543.2527222420504
# TODO
cam_fy
=
724.3369629894005
# TODO
# ["banana1", "kiwi1", "pear2", "strawberry1", "apricot", "orange2", "peach1", "lemon2", "apple2" ]
map_id_obj
=
{
1
:
'
banana1
'
,
2
:
'
kiwi1
'
,
3
:
'
pear2
'
,
4
:
'
strawberry1
'
,
5
:
'
orange2
'
,
6
:
'
peach1
'
,
7
:
'
lemon2
'
,
8
:
'
apple2
'
,
}
K
=
np
.
array
([[
cam_fx
,
0
,
cam_cx
],
[
0
,
cam_fy
,
cam_cy
],
[
0
,
0
,
1
]])
dataset_config_dir
=
'
datasets/linemod/dataset_config
'
dataset_config_dir
=
'
datasets/linemod/dataset_config
'
output_result_dir
=
'
experiments/eval_result/linemod
'
output_result_dir
=
'
experiments/eval_result/linemod
'
knn
=
KNearestNeighbor
(
1
)
knn
=
KNearestNeighbor
(
1
)
estimator
=
PoseNet
(
num_points
=
num_points
,
num_obj
=
num_objects
)
estimator
=
PoseNet
(
num_points
=
num_points
,
num_obj
=
num_objects
)
estimator
.
cuda
()
estimator
.
cuda
()
refiner
=
PoseRefineNet
(
num_points
=
num_points
,
num_obj
=
num_objects
)
refiner
=
PoseRefineNet
(
num_points
=
num_points
,
num_obj
=
num_objects
)
refiner
.
cuda
()
refiner
.
cuda
()
estimator
.
load_state_dict
(
torch
.
load
(
opt
.
model
))
estimator
.
load_state_dict
(
torch
.
load
(
opt
.
model
))
refiner
.
load_state_dict
(
torch
.
load
(
opt
.
refine_model
))
refiner
.
load_state_dict
(
torch
.
load
(
opt
.
refine_model
))
...
@@ -64,20 +185,19 @@ print(diameter)
...
@@ -64,20 +185,19 @@ print(diameter)
success_count
=
[
0
for
i
in
range
(
num_objects
)]
success_count
=
[
0
for
i
in
range
(
num_objects
)]
num_count
=
[
0
for
i
in
range
(
num_objects
)]
num_count
=
[
0
for
i
in
range
(
num_objects
)]
fw
=
open
(
'
{0}/eval_result_logs.txt
'
.
format
(
output_result_dir
),
'
w
'
)
fw
=
open
(
'
{0}/eval_result_logs.txt
'
.
format
(
output_result_dir
),
'
w
'
)
for
i
,
data
in
enumerate
(
testdataloader
,
0
):
for
i
,
data
in
enumerate
(
testdataloader
,
0
):
points
,
choose
,
img
,
target
,
model_points
,
idx
=
data
points
,
choose
,
img
,
target
,
model_points
,
idx
=
data
if
len
(
points
.
size
())
==
2
:
if
len
(
points
.
size
())
==
2
:
print
(
'
No.{0} NOT Pass! Lost detection!
'
.
format
(
i
))
print
(
'
No.{0} NOT Pass! Lost detection!
'
.
format
(
i
))
fw
.
write
(
'
No.{0} NOT Pass! Lost detection!
\n
'
.
format
(
i
))
fw
.
write
(
'
No.{0} NOT Pass! Lost detection!
\n
'
.
format
(
i
))
continue
continue
points
,
choose
,
img
,
target
,
model_points
,
idx
=
Variable
(
points
).
cuda
(),
\
points
,
choose
,
img
,
target
,
model_points
,
idx
=
Variable
(
points
).
cuda
(),
\
Variable
(
choose
).
cuda
(),
\
Variable
(
choose
).
cuda
(),
\
Variable
(
img
).
cuda
(),
\
Variable
(
img
).
cuda
(),
\
Variable
(
target
).
cuda
(),
\
Variable
(
target
).
cuda
(),
\
Variable
(
model_points
).
cuda
(),
\
Variable
(
model_points
).
cuda
(),
\
Variable
(
idx
).
cuda
()
Variable
(
idx
).
cuda
()
pred_r
,
pred_t
,
pred_c
,
emb
=
estimator
(
img
,
points
,
choose
,
idx
)
pred_r
,
pred_t
,
pred_c
,
emb
=
estimator
(
img
,
points
,
choose
,
idx
)
pred_r
=
pred_r
/
torch
.
norm
(
pred_r
,
dim
=
2
).
view
(
1
,
num_points
,
1
)
pred_r
=
pred_r
/
torch
.
norm
(
pred_r
,
dim
=
2
).
view
(
1
,
num_points
,
1
)
pred_c
=
pred_c
.
view
(
bs
,
num_points
)
pred_c
=
pred_c
.
view
(
bs
,
num_points
)
...
@@ -89,11 +209,14 @@ for i, data in enumerate(testdataloader, 0):
...
@@ -89,11 +209,14 @@ for i, data in enumerate(testdataloader, 0):
my_pred
=
np
.
append
(
my_r
,
my_t
)
my_pred
=
np
.
append
(
my_r
,
my_t
)
for
ite
in
range
(
0
,
iteration
):
for
ite
in
range
(
0
,
iteration
):
T
=
Variable
(
torch
.
from_numpy
(
my_t
.
astype
(
np
.
float32
))).
cuda
().
view
(
1
,
3
).
repeat
(
num_points
,
1
).
contiguous
().
view
(
1
,
num_points
,
3
)
T
=
Variable
(
torch
.
from_numpy
(
my_t
.
astype
(
np
.
float32
))).
cuda
().
view
(
1
,
3
).
repeat
(
num_points
,
1
).
contiguous
().
view
(
1
,
num_points
,
3
)
my_mat
=
quaternion_matrix
(
my_r
)
my_mat
=
quaternion_matrix
(
my_r
)
R
=
Variable
(
torch
.
from_numpy
(
my_mat
[:
3
,
:
3
].
astype
(
np
.
float32
))).
cuda
().
view
(
1
,
3
,
3
)
R
=
Variable
(
torch
.
from_numpy
(
my_mat
[:
3
,
:
3
].
astype
(
np
.
float32
))).
cuda
().
view
(
1
,
3
,
3
)
my_mat
[
0
:
3
,
3
]
=
my_t
my_mat
[
0
:
3
,
3
]
=
my_t
new_points
=
torch
.
bmm
((
points
-
T
),
R
).
contiguous
()
new_points
=
torch
.
bmm
((
points
-
T
),
R
).
contiguous
()
pred_r
,
pred_t
=
refiner
(
new_points
,
emb
,
idx
)
pred_r
,
pred_t
=
refiner
(
new_points
,
emb
,
idx
)
pred_r
=
pred_r
.
view
(
1
,
1
,
-
1
)
pred_r
=
pred_r
.
view
(
1
,
1
,
-
1
)
...
@@ -120,6 +243,14 @@ for i, data in enumerate(testdataloader, 0):
...
@@ -120,6 +243,14 @@ for i, data in enumerate(testdataloader, 0):
pred
=
np
.
dot
(
model_points
,
my_r
.
T
)
+
my_t
pred
=
np
.
dot
(
model_points
,
my_r
.
T
)
+
my_t
target
=
target
[
0
].
cpu
().
detach
().
numpy
()
target
=
target
[
0
].
cpu
().
detach
().
numpy
()
matrix
=
np
.
zeros
((
3
,
4
))
matrix
[:
3
,
:
3
]
=
my_r
matrix
[:
3
,
3
]
=
my_t
# np.save(f"results2/result_{i}.npy", matrix)
"""
with open(f
"
results/result_{temp_id}.txt
"
,
"
w
"
) as f:
f.write(f
"
{matrix}
"
)
"""
if
idx
[
0
].
item
()
in
sym_list
:
if
idx
[
0
].
item
()
in
sym_list
:
pred
=
torch
.
from_numpy
(
pred
.
astype
(
np
.
float32
)).
cuda
().
transpose
(
1
,
0
).
contiguous
()
pred
=
torch
.
from_numpy
(
pred
.
astype
(
np
.
float32
)).
cuda
().
transpose
(
1
,
0
).
contiguous
()
target
=
torch
.
from_numpy
(
target
.
astype
(
np
.
float32
)).
cuda
().
transpose
(
1
,
0
).
contiguous
()
target
=
torch
.
from_numpy
(
target
.
astype
(
np
.
float32
)).
cuda
().
transpose
(
1
,
0
).
contiguous
()
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment