Skip to content
Snippets Groups Projects
Commit a4d83119 authored by liuxingyu's avatar liuxingyu
Browse files

rm dr utils

parent 9deff8b8
No related branches found
No related tags found
No related merge requests found
Showing
with 0 additions and 1741 deletions
# differentiable renderer utils
# NOTE: override the kaolin one
from .renderer.base import Renderer as DIBRenderer
from .rasterizer import *
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn
import torch.autograd
from torch.autograd import Function
from kaolin.graphics.dib_renderer.cuda import rasterizer as cuda_rasterizer
import cv2
import numpy as np
import datetime
@torch.jit.script
def prepare_tfpoints(
tfpoints3d_bxfx9,
tfpoints2d_bxfx6,
multiplier: float,
batch_size: int,
num_faces: int,
expand: float,
):
# avoid numeric error
tfpoints2dmul_bxfx6 = multiplier * tfpoints2d_bxfx6
# bbox
tfpoints2d_bxfx3x2 = tfpoints2dmul_bxfx6.view(batch_size, num_faces, 3, 2)
tfpoints_min = torch.min(tfpoints2d_bxfx3x2, dim=2)[0]
tfpoints_max = torch.max(tfpoints2d_bxfx3x2, dim=2)[0]
tfpointsbbox_bxfx4 = torch.cat((tfpoints_min, tfpoints_max), dim=2)
# bbox2
tfpoints_min = tfpoints_min - expand * multiplier
tfpoints_max = tfpoints_max + expand * multiplier
tfpointsbbox2_bxfx4 = torch.cat((tfpoints_min, tfpoints_max), dim=2)
# depth
_tfpoints3d_bxfx9 = tfpoints3d_bxfx9.permute(2, 0, 1)
tfpointsdep_bxfx1 = (
_tfpoints3d_bxfx9[2, :, :] + _tfpoints3d_bxfx9[5, :, :] + _tfpoints3d_bxfx9[8, :, :]
).unsqueeze(-1) / 3.0
return (
tfpoints2dmul_bxfx6,
tfpointsbbox_bxfx4,
tfpointsbbox2_bxfx4,
tfpointsdep_bxfx1,
)
class LinearRasterizer(Function):
@staticmethod
def forward(
ctx,
width,
height,
tfpoints3d_bxfx9,
tfpoints2d_bxfx6,
tfnormalz_bxfx1,
vertex_attr_bxfx3d,
expand=None,
knum=None,
multiplier=None,
delta=None,
debug=False,
):
if expand is None:
expand = 0.02
if knum is None:
knum = 30
if multiplier is None:
multiplier = 1000
if delta is None:
delta = 7000
batch_size = tfpoints3d_bxfx9.shape[0]
num_faces = tfpoints3d_bxfx9.shape[1]
num_vertex_attr = vertex_attr_bxfx3d.shape[2] / 3
assert num_vertex_attr == int(
num_vertex_attr
), "vertex_attr_bxfx3d has shape {} which is not a multiple of 3".format(vertex_attr_bxfx3d.shape[2])
num_vertex_attr = int(num_vertex_attr)
###################################################
start = datetime.datetime.now()
(tfpoints2dmul_bxfx6, tfpointsbbox_bxfx4, tfpointsbbox2_bxfx4, tfpointsdep_bxfx1,) = prepare_tfpoints(
tfpoints3d_bxfx9,
tfpoints2d_bxfx6,
multiplier,
batch_size,
num_faces,
expand,
)
device = tfpoints2dmul_bxfx6.device
# output
tfimidxs_bxhxwx1 = torch.zeros(batch_size, height, width, 1, dtype=torch.float32, device=device)
# set depth as very far
tfimdeps_bxhxwx1 = torch.full(
(batch_size, height, width, 1),
fill_value=-1000.0,
dtype=torch.float32,
device=device,
)
tfimweis_bxhxwx3 = torch.zeros(batch_size, height, width, 3, dtype=torch.float32, device=device)
tfims_bxhxwxd = torch.zeros(
batch_size,
height,
width,
num_vertex_attr,
dtype=torch.float32,
device=device,
)
tfimprob_bxhxwx1 = torch.zeros(batch_size, height, width, 1, dtype=torch.float32, device=device)
# intermidiate varibales
tfprobface = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
tfprobcase = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
tfprobdis = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
tfprobdep = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
tfprobacc = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
# face direction
tfpointsdirect_bxfx1 = tfnormalz_bxfx1.contiguous()
cuda_rasterizer.forward(
tfpoints3d_bxfx9,
tfpoints2dmul_bxfx6,
tfpointsdirect_bxfx1,
tfpointsbbox_bxfx4,
tfpointsbbox2_bxfx4,
tfpointsdep_bxfx1,
vertex_attr_bxfx3d,
tfimidxs_bxhxwx1,
tfimdeps_bxhxwx1,
tfimweis_bxhxwx3,
tfprobface,
tfprobcase,
tfprobdis,
tfprobdep,
tfprobacc,
tfims_bxhxwxd,
tfimprob_bxhxwx1,
multiplier,
delta,
)
end = datetime.datetime.now()
###################################################
if debug:
print(end - start)
ims_bxhxwxd = tfims_bxhxwxd.detach().cpu().numpy()
improbs_bxhxwx1 = tfimprob_bxhxwx1.detach().cpu().numpy()
imidxs_bxhxwx1 = tfimidxs_bxhxwx1.detach().cpu().numpy()
imdeps_bxhxwx1 = tfimdeps_bxhxwx1.detach().cpu().numpy()
imweis_bxhxwx3 = tfimweis_bxhxwx3.detach().cpu().numpy()
print(ims_bxhxwxd.shape)
print(improbs_bxhxwx1.shape)
print(np.max(improbs_bxhxwx1))
cv2.imshow("0", ims_bxhxwxd[-1, :, :, :3])
cv2.imshow("1", improbs_bxhxwx1[-1])
cv2.imshow("2", imweis_bxhxwx3[-1])
cv2.imshow("3", imidxs_bxhxwx1[-1] / num_faces)
cv2.imshow("4", imdeps_bxhxwx1[-1])
cv2.waitKey()
debug_im = torch.zeros(batch_size, height, width, 3, dtype=torch.float32, device=device)
ctx.save_for_backward(
tfims_bxhxwxd,
tfimprob_bxhxwx1,
tfimidxs_bxhxwx1,
tfimweis_bxhxwx3,
tfpoints2dmul_bxfx6,
vertex_attr_bxfx3d,
tfprobface,
tfprobcase,
tfprobdis,
tfprobdep,
tfprobacc,
debug_im,
)
ctx.multiplier = multiplier
ctx.delta = delta
ctx.debug = debug
tfims_bxhxwxd.requires_grad = True
tfimprob_bxhxwx1.requires_grad = True
return tfims_bxhxwxd, tfimprob_bxhxwx1
@staticmethod
def backward(ctx, dldI_bxhxwxd, dldp_bxhxwx1):
(
tfims_bxhxwxd,
tfimprob_bxhxwx1,
tfimidxs_bxhxwx1,
tfimweis_bxhxwx3,
tfpoints2dmul_bxfx6,
tfcolors_bxfx3d,
tfprobface,
tfprobcase,
tfprobdis,
tfprobdep,
tfprobacc,
debug_im,
) = ctx.saved_variables
multiplier = ctx.multiplier
delta = ctx.delta
debug = ctx.debug
# avoid numeric error
# multiplier = 1000
# tfpoints2d_bxfx6 *= multiplier
dldp2 = torch.zeros_like(tfpoints2dmul_bxfx6)
dldp2_prob = torch.zeros_like(tfpoints2dmul_bxfx6)
dldc = torch.zeros_like(tfcolors_bxfx3d)
cuda_rasterizer.backward(
dldI_bxhxwxd.contiguous(),
dldp_bxhxwx1.contiguous(),
tfims_bxhxwxd,
tfimprob_bxhxwx1,
tfimidxs_bxhxwx1,
tfimweis_bxhxwx3,
tfprobface,
tfprobcase,
tfprobdis,
tfprobdep,
tfprobacc,
tfpoints2dmul_bxfx6,
tfcolors_bxfx3d,
dldp2,
dldc,
dldp2_prob,
debug_im,
multiplier,
delta,
)
if debug:
print(dldc[dldc > 0.1])
print(dldc[dldc > 0.1].shape)
print(dldp2[dldp2 > 0.1])
print(dldp2[dldp2 > 0.1].shape)
print(dldp2_prob[dldp2_prob > 0.1])
print(dldp2_prob[dldp2_prob > 0.1].shape)
return (
None,
None,
None,
dldp2 + dldp2_prob,
None,
dldc,
None,
None,
None,
None,
None,
None,
)
linear_rasterizer = LinearRasterizer.apply
from .base import *
from .phongrender import *
from .shrender import *
from .texrender import *
from .vcrender import *
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
from core.utils.pose_utils import quat2mat_torch
from ..utils import perspectiveprojectionnp, projectiveprojection_real
from .phongrender import PhongRender
from .shrender import SHRender
from .texrender import TexRender as Lambertian
from .vcrender import VCRender
from .vcrender_batch import VCRenderBatch
from .vcrender_multi import VCRenderMulti
from .texrender_multi import TexRenderMulti
from .texrender_batch import TexRenderBatch
import numpy as np
import torch
import torch.nn as nn
# renderers = {'VertexColor': VCRender, 'Lambertian': Lambertian, 'SphericalHarmonics': SHRender, 'Phong': PhongRender}
renderers = {
"VertexColor": VCRender,
"VertexColorMulti": VCRenderMulti,
"VertexColorBatch": VCRenderBatch,
"Lambertian": Lambertian,
"Texture": Lambertian, # alias
"TextureMulti": TexRenderMulti,
"TextureBatch": TexRenderBatch,
"SphericalHarmonics": SHRender,
"Phong": PhongRender,
}
class Renderer(nn.Module):
def __init__(
self,
height,
width,
mode="VertexColor",
camera_center=None,
camera_up=None,
camera_fov_y=None,
):
super(Renderer, self).__init__()
assert mode in renderers, "Passed mode {0} must in in list of accepted modes: {1}".format(mode, renderers)
self.mode = mode
yz_flip = np.eye(3, dtype=np.float32)
yz_flip[1, 1], yz_flip[2, 2] = -1, -1
self.yz_flip = torch.tensor(yz_flip, device="cuda:0")
self.renderer = renderers[mode](height, width)
if camera_center is None:
self.camera_center = np.array([0, 0, 0], dtype=np.float32)
if camera_up is None:
self.camera_up = np.array([0, 1, 0], dtype=np.float32)
if camera_fov_y is None:
self.camera_fov_y = 49.13434207744484 * np.pi / 180.0
self.camera_params = None
def forward(self, points, *args, **kwargs):
if self.camera_params is None:
print(
"Camera parameters have not been set, default perspective parameters of distance = 1, elevation = 30, azimuth = 0 are being used"
)
self.set_look_at_parameters([0], [30], [1])
if self.mode in [
"VertexColorMulti",
"VertexColorBatch",
"TextureMulti",
"TextureBatch",
]:
assert self.camera_params[0].shape[0] == len(
points
), "multi mode need the same length of camera parameters and points"
else:
assert (
self.camera_params[0].shape[0] == points[0].shape[0]
), "Set camera parameters batch size must equal\
batch size of passed points"
return self.renderer(points, self.camera_params, *args, **kwargs)
def set_look_at_parameters(self, azimuth, elevation, distance):
from kaolin.mathutils.geometry.transformations import (
compute_camera_params,
)
camera_projection_mtx = perspectiveprojectionnp(self.camera_fov_y, 1.0)
camera_projection_mtx = torch.FloatTensor(camera_projection_mtx).cuda()
camera_view_mtx = []
camera_view_shift = []
for a, e, d in zip(azimuth, elevation, distance):
mat, pos = compute_camera_params(a, e, d)
camera_view_mtx.append(mat)
camera_view_shift.append(pos)
camera_view_mtx = torch.stack(camera_view_mtx).cuda()
camera_view_shift = torch.stack(camera_view_shift).cuda()
self.camera_params = [
camera_view_mtx,
camera_view_shift,
camera_projection_mtx,
]
def set_camera_parameters(self, parameters):
self.camera_params = parameters
def set_camera_parameters_from_RT_K(self, Rs, ts, Ks, height, width, near=0.01, far=10.0, rot_type="mat"):
"""
Rs: a list of rotations tensor
ts: a list of translations tensor
Ks: a list of camera intrinsic matrices or a single matrix
----
[cam_view_R, cam_view_pos, cam_proj]
"""
"""
aspect_ratio = width / height
fov_x, fov_y = K_to_fov(K, height, width)
# camera_projection_mtx = perspectiveprojectionnp(self.camera_fov_y,
# ratio=aspect_ratio, near=near, far=far)
camera_projection_mtx = perspectiveprojectionnp(fov_y,
ratio=aspect_ratio, near=near, far=far)
"""
assert rot_type in ["mat", "quat"], rot_type
bs = len(Rs)
single_K = False
if isinstance(Ks, (np.ndarray, torch.Tensor)) and Ks.ndim == 2:
K = Ks
camera_proj_mtx = projectiveprojection_real(K, 0, 0, width, height, near, far)
camera_proj_mtx = torch.as_tensor(camera_proj_mtx).float().cuda() # 4x4
single_K = True
camera_view_mtx = []
camera_view_shift = []
if not single_K:
camera_proj_mtx = []
for i in range(bs):
R = Rs[i]
t = ts[i]
if not isinstance(R, torch.Tensor):
R = torch.tensor(R, dtype=torch.float32, device="cuda:0")
if not isinstance(t, torch.Tensor):
t = torch.tensor(t, dtype=torch.float32, device="cuda:0")
if rot_type == "quat":
R = quat2mat_torch(R.unsqueeze(0))[0]
cam_view_R = torch.matmul(self.yz_flip.to(R), R)
cam_view_t = -(torch.matmul(R.t(), t)) # cam pos
camera_view_mtx.append(cam_view_R)
camera_view_shift.append(cam_view_t)
if not single_K:
K = Ks[i]
cam_proj_mtx = projectiveprojection_real(K, 0, 0, width, height, near, far)
cam_proj_mtx = torch.as_tensor(cam_proj_mtx).float().cuda() # 4x4
camera_proj_mtx.append(cam_proj_mtx)
camera_view_mtx = torch.stack(camera_view_mtx).cuda() # bx3x3
camera_view_shift = torch.stack(camera_view_shift).cuda() # bx3
if not single_K:
camera_proj_mtx = torch.stack(camera_proj_mtx) # bx3x1 or bx4x4
# print("camera view matrix: \n", camera_view_mtx, camera_view_mtx.shape) # bx3x3, camera rot?
# print('camera view shift: \n', camera_view_shift, camera_view_shift.shape) # bx3, camera trans?
# print('camera projection mat: \n', camera_proj_mtx, camera_proj_mtx.shape) # projection matrix, 3x1
self.camera_params = [
camera_view_mtx,
camera_view_shift,
camera_proj_mtx,
]
# self.rot_type = rot_type
def K_to_fov(K, height, width):
fx = K[0, 0]
fy = K[1, 1]
fov_x = 2 * np.arctan2(width, 2 * fx) # radian
fov_y = 2 * np.arctan2(height, 2 * fy)
return fov_x, fov_y
from .frag_phongtex import *
from .frag_shtex import *
from .frag_tex import *
from .interpolation import *
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn
from .interpolation import texinterpolation
#####################################################
def fragmentshader(
imnormal1_bxhxwx3,
lightdirect1_bx3,
eyedirect1_bxhxwx3,
material_bx3x3,
shininess_bx1,
imtexcoord_bxhxwx2,
texture_bx3xthxtw,
improb_bxhxwx1,
):
# parallel light
lightdirect1_bx1x1x3 = lightdirect1_bx3.view(-1, 1, 1, 3)
# lambertian
cosTheta_bxhxwx1 = torch.sum(imnormal1_bxhxwx3 * lightdirect1_bx1x1x3, dim=3, keepdim=True)
cosTheta_bxhxwx1 = torch.clamp(cosTheta_bxhxwx1, 0, 1)
# specular
reflect = -lightdirect1_bx1x1x3 + 2 * cosTheta_bxhxwx1 * imnormal1_bxhxwx3
cosAlpha_bxhxwx1 = torch.sum(reflect * eyedirect1_bxhxwx3, dim=3, keepdim=True)
cosAlpha_bxhxwx1 = torch.clamp(cosAlpha_bxhxwx1, 1e-5, 1) # should not be 0 since nan error
cosAlpha_bxhxwx1 = torch.pow(cosAlpha_bxhxwx1, shininess_bx1.view(-1, 1, 1, 1)) # shininess should be large than 0
# simplified model
# light color is [1, 1, 1]
MatAmbColor_bx1x1x3 = material_bx3x3[:, 0:1, :].view(-1, 1, 1, 3)
MatDifColor_bxhxwx3 = material_bx3x3[:, 1:2, :].view(-1, 1, 1, 3) * cosTheta_bxhxwx1
MatSpeColor_bxhxwx3 = material_bx3x3[:, 2:3, :].view(-1, 1, 1, 3) * cosAlpha_bxhxwx1
# tex color
texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw)
# ambient and diffuse rely on object color while specular doesn't
color = (MatAmbColor_bx1x1x3 + MatDifColor_bxhxwx3) * texcolor_bxhxwx3 + MatSpeColor_bxhxwx3
color = color * improb_bxhxwx1
return torch.clamp(color, 0, 1)
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn
from .interpolation import texinterpolation
def fragmentshader(
imnormal1_bxhxwx3,
lightparam_bx9,
imtexcoord_bxhxwx2,
texture_bx3xthxtw,
improb_bxhxwx1,
):
# light effect
x = imnormal1_bxhxwx3[:, :, :, 0:1]
y = imnormal1_bxhxwx3[:, :, :, 1:2]
z = imnormal1_bxhxwx3[:, :, :, 2:3]
# spherical harmonic parameters
band0 = 0.2820948 * torch.ones_like(x)
band10 = -0.3257350 * y
band11 = 0.3257350 * z
band12 = -0.3257350 * x
band20 = 0.2731371 * (x * y)
band21 = -0.2731371 * (y * z)
band22 = 0.1365686 * (z * z) - 0.0788479
band23 = -0.1931371 * (x * z)
band24 = 0.1365686 * (x * x - y * y)
bands = torch.cat(
(
band0,
band10,
band11,
band12,
band20,
band21,
band22,
band23,
band24,
),
dim=3,
)
coef = torch.sum(bands * lightparam_bx9.view(-1, 1, 1, 9), dim=3, keepdim=True)
# tex color
texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw)
# merge
color = coef * texcolor_bxhxwx3 * improb_bxhxwx1
return torch.clamp(color, 0, 1)
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn
from .interpolation import texinterpolation
################################################
def fragmentshader(imtexcoord_bxhxwx2, texture_bx3xthxtw, improb_bxhxwx1, filtering="nearest"):
# interpolation
texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw, filtering=filtering)
# mask
color = texcolor_bxhxwx3 * improb_bxhxwx1
return torch.clamp(color, 0, 1)
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import torch
import torch.nn
################################################
def texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw, filtering="nearest"):
"""Note that opengl tex coord is different from pytorch coord ogl coord
ranges from 0 to 1, y axis is from bottom to top and it supports circular
mode(-0.1 is the same as 0.9) pytorch coord ranges from -1 to 1, y axis is
from top to bottom and does not support circular.
filtering is the same as the mode parameter for
torch.nn.functional.grid_sample.
"""
# convert coord mode from ogl to pytorch
imtexcoord_bxhxwx2 = torch.remainder(imtexcoord_bxhxwx2, 1.0)
imtexcoord_bxhxwx2 = imtexcoord_bxhxwx2 * 2 - 1 # [0, 1] to [-1, 1]
imtexcoord_bxhxwx2[:, :, :, 1] = -1.0 * imtexcoord_bxhxwx2[:, :, :, 1] # reverse y
# sample
texcolor = torch.nn.functional.grid_sample(texture_bx3xthxtw, imtexcoord_bxhxwx2, mode=filtering)
texcolor = texcolor.permute(0, 2, 3, 1)
return texcolor
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .fragment_shaders.frag_phongtex import fragmentshader
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
##################################################################
class PhongRender(nn.Module):
def __init__(self, height, width):
super(PhongRender, self).__init__()
self.height = height
self.width = width
# render with point normal or not
self.smooth = False
def set_smooth(self, pfmtx):
self.smooth = True
self.pfmtx = torch.from_numpy(pfmtx).view(1, pfmtx.shape[0], pfmtx.shape[1]).cuda()
def forward(
self,
points,
cameras,
uv_bxpx2,
texture_bx3xthxtw,
lightdirect_bx3,
material_bx3x3,
shininess_bx1,
ft_fx3=None,
):
"""
points: [points_bxpx3, faces_fx3]
cameras: camera parameters
[camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
"""
assert lightdirect_bx3 is not None, "When using the Phong model, light parameters must be passed"
assert material_bx3x3 is not None, "When using the Phong model, material parameters must be passed"
assert shininess_bx1 is not None, "When using the Phong model, shininess parameters must be passed"
##############################################################
# first, MVP projection in vertexshader
points_bxpx3, faces_fx3 = points
# use faces_fx3 as ft_fx3 if not given
if ft_fx3 is None:
ft_fx3 = faces_fx3
# camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
####################################################
# smooth or not
if self.smooth:
normal_bxpx3 = torch.matmul(self.pfmtx.repeat(normal_bxfx3.shape[0], 1, 1), normal_bxfx3)
n0 = normal_bxpx3[:, faces_fx3[:, 0], :]
n1 = normal_bxpx3[:, faces_fx3[:, 1], :]
n2 = normal_bxpx3[:, faces_fx3[:, 2], :]
normal_bxfx9 = torch.cat((n0, n1, n2), dim=2)
else:
normal_bxfx9 = normal_bxfx3.repeat(1, 1, 3)
############################################################
# second, rasterization
fnum = normal1_bxfx3.shape[1]
bnum = normal1_bxfx3.shape[0]
# we have uv, normal, eye to interpolate
c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
uv_bxfx3x3 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2).view(bnum, fnum, 3, -1)
# normal & eye direction
normal_bxfx3x3 = normal_bxfx9.view(bnum, fnum, 3, -1)
eyedirect_bxfx9 = -points3d_bxfx9
eyedirect_bxfx3x3 = eyedirect_bxfx9.view(-1, fnum, 3, 3)
feat = torch.cat((normal_bxfx3x3, eyedirect_bxfx3x3, uv_bxfx3x3), dim=3)
feat = feat.view(bnum, fnum, -1)
imfeature, improb_bxhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_bxfx9,
points2d_bxfx6,
normalz_bxfx1,
feat,
)
##################################################################
imnormal = imfeature[:, :, :, :3]
imeye = imfeature[:, :, :, 3:6]
imtexcoords = imfeature[:, :, :, 6:8]
immask = imfeature[:, :, :, 8:9]
# normalize
imnormal1 = datanormalize(imnormal, axis=3)
lightdirect_bx3 = datanormalize(lightdirect_bx3, axis=1)
imeye1 = datanormalize(imeye, axis=3)
imrender = fragmentshader(
imnormal1,
lightdirect_bx3,
imeye1,
material_bx3x3,
shininess_bx1,
imtexcoords,
texture_bx3xthxtw,
immask,
)
# return imrender, improb_bxhxwx1, normal1_bxfx3
return imrender, improb_bxhxwx1, normal1_bxfx3, immask
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .fragment_shaders.frag_shtex import fragmentshader
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
##################################################################
class SHRender(nn.Module):
def __init__(self, height, width):
super(SHRender, self).__init__()
self.height = height
self.width = width
# render with point normal or not
self.smooth = False
def set_smooth(self, pfmtx):
self.smooth = True
self.pfmtx = pfmtx
def forward(
self,
points,
cameras,
uv_bxpx2,
texture_bx3xthxtw,
lightparam,
ft_fx3=None,
):
"""
points: [points_bxpx3, faces_fx3]
cameras: camera parameters
[camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
"""
assert lightparam is not None, "When using the Spherical Harmonics model, light parameters must be passed"
##############################################################
# first, MVP projection in vertexshader
points_bxpx3, faces_fx3 = points
# use faces_fx3 as ft_fx3 if not given
if ft_fx3 is None:
ft_fx3 = faces_fx3
# camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
####################################################
# smooth or not
if self.smooth:
normal_bxpx3 = torch.matmul(self.pfmtx, normal_bxfx3)
n0 = normal_bxpx3[:, faces_fx3[:, 0], :]
n1 = normal_bxpx3[:, faces_fx3[:, 1], :]
n2 = normal_bxpx3[:, faces_fx3[:, 2], :]
normal_bxfx9 = torch.cat((n0, n1, n2), dim=2)
else:
normal_bxfx9 = normal_bxfx3.repeat(1, 1, 3)
#########################################################
# second, rasterization
fnum = normal1_bxfx3.shape[1]
bnum = normal1_bxfx3.shape[0]
c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
uv_bxfx3x3 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2).view(bnum, fnum, 3, -1)
# normal
normal_bxfx3x3 = normal_bxfx9.view(bnum, fnum, 3, -1)
feat = torch.cat((normal_bxfx3x3, uv_bxfx3x3), dim=3)
feat = feat.view(bnum, fnum, -1)
imfeat, improb_bxhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_bxfx9,
points2d_bxfx6,
normalz_bxfx1,
feat,
)
imnormal_bxhxwx3 = imfeat[:, :, :, :3]
imtexcoords = imfeat[:, :, :, 3:5]
hardmask = imfeat[:, :, :, 5:]
####################################################
# fragrement shader
# parallel light
imnormal1_bxhxwx3 = datanormalize(imnormal_bxhxwx3, axis=3)
imrender = fragmentshader(
imnormal1_bxhxwx3,
lightparam,
imtexcoords,
texture_bx3xthxtw,
hardmask,
)
# return imrender, improb_bxhxwx1, normal1_bxfx3
return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .fragment_shaders.frag_tex import fragmentshader
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
##################################################################
class TexRender(nn.Module):
def __init__(self, height, width, filtering="nearest"):
super(TexRender, self).__init__()
self.height = height
self.width = width
self.filtering = filtering
def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):
"""
points: points_bxpx3, faces_fx3
cameras: camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1
"""
##############################################################
# first, MVP projection in vertexshader
points_bxpx3, faces_fx3 = points
# use faces_fx3 as ft_fx3 if not given
if ft_fx3 is None:
ft_fx3 = faces_fx3
# camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
############################################################
# second, rasterization
c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
uv_bxfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
imfeat, improb_bxhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_bxfx9,
points2d_bxfx6,
normalz_bxfx1,
uv_bxfx9,
)
imtexcoords = imfeat[:, :, :, :2]
hardmask = imfeat[:, :, :, 2:3]
# fragrement shader
imrender = fragmentshader(imtexcoords, texture_bx3xthxtw, hardmask, filtering=self.filtering)
# return imrender, improb_bxhxwx1, normal1_bxfx3
return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .fragment_shaders.frag_tex import fragmentshader
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
import numpy as np
##################################################################
class TexRenderBatch(nn.Module):
def __init__(self, height, width, filtering="nearest"):
super(TexRenderBatch, self).__init__()
self.height = height
self.width = width
self.filtering = filtering
def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):
"""
points: b x [points_1xpx3, faces_fx3]
cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
uv_bxpx2: b x [1xpx2]
texture_bx3xthxtw: b x [1x3xthxtw]
ft_fx3: b x [fx3]
"""
b = len(points)
assert b > 0, b
points3d_1xfx9_list = []
points2d_1xfx6_list = []
normalz_1xfx1_list = []
normal1_1xfx3_list = []
uv_1xfx9_list = []
single_intrinsic = True
if cameras[2].ndim == 3:
assert cameras[2].shape[0] == b
single_intrinsic = False
for i in range(b):
##############################################################
# first, MVP projection in vertexshader
points_1xpx3, faces_fx3 = points[i]
if single_intrinsic:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2],
]
else:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2][i],
]
# use faces_fx3 as ft_fx3 if not given
if ft_fx3 is None:
ft_fx3_single = faces_fx3
else:
ft_fx3_single = ft_fx3[i]
(
points3d_1xfx9,
points2d_1xfx6,
normal_1xfx3,
) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
############################################################
# second, rasterization
uv_1xpx2 = uv_bxpx2[i]
c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]
c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]
c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
# append data
points3d_1xfx9_list.append(points3d_1xfx9)
points2d_1xfx6_list.append(points2d_1xfx6)
normalz_1xfx1_list.append(normalz_1xfx1)
normal1_1xfx3_list.append(normal1_1xfx3)
uv_1xfx9_list.append(uv_1xfx9)
# put the object with larger depth earlier
# imrender = torch.empty((1, self.height, self.width, 3), device=device, dtype=torch.float32)
# improb_1xhxwx1 = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)
# fg_mask = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)
ren_ims = []
ren_masks = []
ren_probs = []
for i in range(b):
imfeat, improb_1xhxwx1_i = linear_rasterizer(
self.width,
self.height,
points3d_1xfx9_list[i],
points2d_1xfx6_list[i],
normalz_1xfx1_list[i],
uv_1xfx9_list[i],
)
imtexcoords = imfeat[:, :, :, :2] # (1,H,W,2)
hardmask = imfeat[:, :, :, 2:3] # (1,H,W,1) mask
# fragrement shader
texture_1x3xthxtw = texture_bx3xthxtw[i]
imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)
ren_ims.append(imrender_i) # 1HW3
ren_probs.append(improb_1xhxwx1_i)
ren_masks.append(hardmask)
imrender = torch.cat(ren_ims, dim=0) # bHW3
improb_bxhxwx1 = torch.cat(ren_probs, dim=0)
mask_bxhxwx1 = torch.cat(ren_masks, dim=0)
# return imrender, improb_1xhxwx1, normal1_1xfx3_list
return imrender, improb_bxhxwx1, normal1_1xfx3_list, mask_bxhxwx1
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .fragment_shaders.frag_tex import fragmentshader
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
import numpy as np
##################################################################
class TexRenderMulti(nn.Module):
def __init__(self, height, width, filtering="nearest"):
super(TexRenderMulti, self).__init__()
self.height = height
self.width = width
self.filtering = filtering
def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ts, ft_fx3=None):
"""
points: b x [points_1xpx3, faces_fx3]
cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
uv_bxpx2: b x [1xpx2]
texture_bx3xthxtw: b x [1x3xthxtw]
ts: list of translations
ft_fx3: b x [fx3]
"""
b = len(points)
points3d_1xfx9_list = []
points2d_1xfx6_list = []
normalz_1xfx1_list = []
normal1_1xfx3_list = []
uv_1xfx9_list = []
distances = np.array([t[2] for t in ts])
dist_inds = np.argsort(distances)[::-1] # descending order
single_intrinsic = True
if cameras[2].ndim == 3:
assert cameras[2].shape[0] == b
single_intrinsic = False
for i in range(b):
##############################################################
# first, MVP projection in vertexshader
points_1xpx3, faces_fx3 = points[i]
if single_intrinsic:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2],
]
else:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2][i],
]
# use faces_fx3 as ft_fx3 if not given
if ft_fx3 is None:
ft_fx3_single = faces_fx3
else:
ft_fx3_single = ft_fx3[i]
(
points3d_1xfx9,
points2d_1xfx6,
normal_1xfx3,
) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
############################################################
# second, rasterization
uv_1xpx2 = uv_bxpx2[i]
c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]
c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]
c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
# append data
points3d_1xfx9_list.append(points3d_1xfx9)
points2d_1xfx6_list.append(points2d_1xfx6)
normalz_1xfx1_list.append(normalz_1xfx1)
normal1_1xfx3_list.append(normal1_1xfx3)
uv_1xfx9_list.append(uv_1xfx9)
# put the object with larger depth earlier
ren_ims = []
ren_masks = []
ren_probs = []
for dist_ind in dist_inds: # NOTE: not True but very close
imfeat, improb_1xhxwx1_i = linear_rasterizer(
self.width,
self.height,
points3d_1xfx9_list[dist_ind],
points2d_1xfx6_list[dist_ind],
normalz_1xfx1_list[dist_ind],
uv_1xfx9_list[dist_ind],
)
imtexcoords = imfeat[:, :, :, :2] # (1,H,W,2)
hardmask = imfeat[:, :, :, 2:3] # (1,H,W,1) mask
# fragrement shader
texture_1x3xthxtw = texture_bx3xthxtw[dist_ind]
imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)
ren_ims.append(imrender_i)
ren_probs.append(improb_1xhxwx1_i)
ren_masks.append(hardmask)
for i in range(len(dist_inds)):
if i == 0:
imrender = ren_ims[0]
improb_1xhxwx1 = ren_probs[0]
fg_mask = ren_masks[0]
else:
imrender_i = ren_ims[i]
improb_1xhxwx1_i = ren_probs[i]
hardmask_i = ren_masks[i]
mask_inds = torch.where(hardmask_i[0, :, :, 0] > 0.5)
imrender[:, mask_inds[0], mask_inds[1], :] = imrender_i[:, mask_inds[0], mask_inds[1], :]
improb_1xhxwx1[:, mask_inds[0], mask_inds[1], :] = improb_1xhxwx1_i[:, mask_inds[0], mask_inds[1], :]
fg_mask[:, mask_inds[0], mask_inds[1], :] = hardmask_i[:, mask_inds[0], mask_inds[1], :]
# return imrender, improb_1xhxwx1, normal1_1xfx3_list
# TODO: we can also return instance visible masks, full masks
return imrender, improb_1xhxwx1, normal1_1xfx3_list, fg_mask
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
##################################################################
class VCRender(nn.Module):
"""Vertex-Color Renderer."""
def __init__(self, height, width):
super(VCRender, self).__init__()
self.height = height
self.width = width
def forward(self, points, cameras, colors_bxpx3):
"""
points: [points_bxpx3, faces_fx3]
cameras: camera parameters
[camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
"""
##############################################################
# first, MVP projection in vertexshader
points_bxpx3, faces_fx3 = points
# camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
############################################################
# second, rasterization
c0 = colors_bxpx3[:, faces_fx3[:, 0], :]
c1 = colors_bxpx3[:, faces_fx3[:, 1], :]
c2 = colors_bxpx3[:, faces_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
color_bxfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
imfeat, improb_bxhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_bxfx9,
points2d_bxfx6,
normalz_bxfx1,
color_bxfx12,
)
imrender = imfeat[:, :, :, :3]
hardmask = imfeat[:, :, :, 3:]
# return imrender, improb_bxhxwx1, normal1_bxfx3
return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
from functools import partial
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
##################################################################
class VCRenderBatch(nn.Module):
"""Vertex-Color Renderer Batch (batch rendering for different objects, only
one object for each image) The original one only support batch rendering
for a single object."""
def __init__(self, height, width):
super(VCRenderBatch, self).__init__()
self.height = height
self.width = width
def forward(self, points, cameras, colors):
"""
points: b x [points_1xpx3, faces_fx3]
cameras: camera parameters
[camera_rot_bx3x3, camera_pos_bx3, camera_proj_{b}x3x1]
colors_list: b x [colors_1xpx3]
"""
b = len(points)
points3d_1xfx9_list = []
points2d_1xfx6_list = []
normalz_1xfx1_list = []
normal1_1xfx3_list = []
color_1xfx12_list = []
single_intrinsic = True
if cameras[2].ndim == 3:
assert cameras[2].shape[0] == b
single_intrinsic = False
for i in range(b):
##############################################################
# first, MVP projection in vertexshader
points_1xpx3, faces_fx3 = points[i]
if single_intrinsic:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2],
]
else:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2][i],
]
(
points3d_1xfx9,
points2d_1xfx6,
normal_1xfx3,
) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
############################################################
# second, rasterization
colors_1xpx3 = colors[i]
c0 = colors_1xpx3[:, faces_fx3[:, 0], :]
c1 = colors_1xpx3[:, faces_fx3[:, 1], :]
c2 = colors_1xpx3[:, faces_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
color_1xfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
# append data
points3d_1xfx9_list.append(points3d_1xfx9)
points2d_1xfx6_list.append(points2d_1xfx6)
normalz_1xfx1_list.append(normalz_1xfx1)
normal1_1xfx3_list.append(normal1_1xfx3)
color_1xfx12_list.append(color_1xfx12)
# points3d_1xFx9 = torch.cat(points3d_1xfx9_list, dim=1)
# points2d_1xFx6 = torch.cat(points2d_1xfx6_list, dim=1)
# normalz_1xFx1 = torch.cat(normalz_1xfx1_list, dim=1)
# normal1_1xFx3 = torch.cat(normal1_1xfx3_list, dim=1)
# color_1xFx12 = torch.cat(color_1xfx12_list, dim=1)
if True:
imfeat_list, improb_list = multi_apply(
linear_rasterizer,
[self.width for _ in range(b)],
[self.height for _ in range(b)],
points3d_1xfx9_list,
points2d_1xfx6_list,
normalz_1xfx1_list,
color_1xfx12_list,
)
else: # debug
imfeat_list, improb_list = multi_apply(
linear_rasterizer,
[self.width for _ in range(b)],
[self.height for _ in range(b)],
points3d_1xfx9_list,
points2d_1xfx6_list,
normalz_1xfx1_list,
color_1xfx12_list,
[0.02 for _ in range(b)],
[30 for _ in range(b)],
[1000 for _ in range(b)],
[7000 for _ in range(b)],
[True for _ in range(b)],
) # the last one is debug
imfeat = torch.cat(imfeat_list, dim=0) # [b,H,W,4]
improb_bxhxwx1 = torch.cat(improb_list, dim=0) # [b,H,W,1]
imrender = imfeat[:, :, :, :3] # (b,H,W,3), rgb
hardmask = imfeat[:, :, :, 3:] # (b,H,W,1) mask
if False:
import cv2
hardmask_cpu = hardmask.detach().cpu().numpy()[0][:, :, 0]
cv2.imshow("hardmask", hardmask_cpu)
# return imrender, improb_1xhxwx1, normal1_1xFx3
return imrender, improb_bxhxwx1, normal1_1xfx3_list, hardmask
from __future__ import division
from ..rasterizer import linear_rasterizer
from ..utils import datanormalize
from .vertex_shaders.perpsective import perspective_projection
import torch
import torch.nn as nn
##################################################################
class VCRenderMulti(nn.Module):
"""Vertex-Color Renderer."""
def __init__(self, height, width):
super(VCRenderMulti, self).__init__()
self.height = height
self.width = width
def forward(self, points, cameras, colors):
"""
points: b x [points_1xpx3, faces_fx3]
cameras: camera parameters
[camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
colors_list: b x [colors_1xpx3]
"""
b = len(points)
points3d_1xfx9_list = []
points2d_1xfx6_list = []
normalz_1xfx1_list = []
normal1_1xfx3_list = []
color_1xfx12_list = []
single_intrinsic = True
if cameras[2].ndim == 3:
assert cameras[2].shape[0] == b
single_intrinsic = False
for i in range(b):
##############################################################
# first, MVP projection in vertexshader
points_1xpx3, faces_fx3 = points[i]
if single_intrinsic:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2],
]
else:
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2][i],
]
cam_params = [
cameras[0][i : i + 1],
cameras[1][i : i + 1],
cameras[2],
]
(
points3d_1xfx9,
points2d_1xfx6,
normal_1xfx3,
) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
################################################################
# normal
# decide which faces are front and which faces are back
normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
# normalz_bxfx1 = torch.abs(normalz_bxfx1)
# normalize normal
normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
############################################################
# second, rasterization
colors_1xpx3 = colors[i]
c0 = colors_1xpx3[:, faces_fx3[:, 0], :]
c1 = colors_1xpx3[:, faces_fx3[:, 1], :]
c2 = colors_1xpx3[:, faces_fx3[:, 2], :]
mask = torch.ones_like(c0[:, :, :1])
color_1xfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
# append data
points3d_1xfx9_list.append(points3d_1xfx9)
points2d_1xfx6_list.append(points2d_1xfx6)
normalz_1xfx1_list.append(normalz_1xfx1)
normal1_1xfx3_list.append(normal1_1xfx3)
color_1xfx12_list.append(color_1xfx12)
points3d_1xFx9 = torch.cat(points3d_1xfx9_list, dim=1)
points2d_1xFx6 = torch.cat(points2d_1xfx6_list, dim=1)
normalz_1xFx1 = torch.cat(normalz_1xfx1_list, dim=1)
normal1_1xFx3 = torch.cat(normal1_1xfx3_list, dim=1)
color_1xFx12 = torch.cat(color_1xfx12_list, dim=1)
if True:
imfeat, improb_1xhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_1xFx9,
points2d_1xFx6,
normalz_1xFx1,
color_1xFx12,
)
else: # debug
imfeat, improb_1xhxwx1 = linear_rasterizer(
self.width,
self.height,
points3d_1xFx9,
points2d_1xFx6,
normalz_1xFx1,
color_1xFx12,
0.02,
30,
1000,
7000,
True,
) # the last one is debug
imrender = imfeat[:, :, :, :3] # (1,H,W,3), rgb
hardmask = imfeat[:, :, :, 3:] # (1,H,W,1) mask
if False:
import cv2
hardmask_cpu = hardmask.detach().cpu().numpy()[0][:, :, 0]
cv2.imshow("hardmask", hardmask_cpu)
# return imrender, improb_1xhxwx1, normal1_1xFx3
return imrender, improb_1xhxwx1, normal1_1xFx3, hardmask
from .perpsective import *
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment