diff --git a/lib/dr_utils/__init__.py b/lib/dr_utils/__init__.py
deleted file mode 100644
index 2b60d5a537d04fe94ac7d2fab32f1b835f2db89c..0000000000000000000000000000000000000000
--- a/lib/dr_utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# differentiable renderer utils
diff --git a/lib/dr_utils/dib_renderer_x/__init__.py b/lib/dr_utils/dib_renderer_x/__init__.py
deleted file mode 100644
index e59a96f9cbef90916ed0e04cd8d6ed7564a75b9b..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# NOTE: override the kaolin one
-from .renderer.base import Renderer as DIBRenderer
diff --git a/lib/dr_utils/dib_renderer_x/rasterizer/__init__.py b/lib/dr_utils/dib_renderer_x/rasterizer/__init__.py
deleted file mode 100644
index 708ab42984ee965f86b7e3e52ee46beb714dbf51..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/rasterizer/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .rasterizer import *
diff --git a/lib/dr_utils/dib_renderer_x/rasterizer/rasterizer.py b/lib/dr_utils/dib_renderer_x/rasterizer/rasterizer.py
deleted file mode 100644
index 4d0a198504df3b38ed9d8a60bf20617b5d530c27..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/rasterizer/rasterizer.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-import torch.autograd
-from torch.autograd import Function
-
-from kaolin.graphics.dib_renderer.cuda import rasterizer as cuda_rasterizer
-
-import cv2
-import numpy as np
-import datetime
-
-
-@torch.jit.script
-def prepare_tfpoints(
-    tfpoints3d_bxfx9,
-    tfpoints2d_bxfx6,
-    multiplier: float,
-    batch_size: int,
-    num_faces: int,
-    expand: float,
-):
-    # avoid numeric error
-    tfpoints2dmul_bxfx6 = multiplier * tfpoints2d_bxfx6
-
-    # bbox
-    tfpoints2d_bxfx3x2 = tfpoints2dmul_bxfx6.view(batch_size, num_faces, 3, 2)
-    tfpoints_min = torch.min(tfpoints2d_bxfx3x2, dim=2)[0]
-    tfpoints_max = torch.max(tfpoints2d_bxfx3x2, dim=2)[0]
-    tfpointsbbox_bxfx4 = torch.cat((tfpoints_min, tfpoints_max), dim=2)
-
-    # bbox2
-    tfpoints_min = tfpoints_min - expand * multiplier
-    tfpoints_max = tfpoints_max + expand * multiplier
-    tfpointsbbox2_bxfx4 = torch.cat((tfpoints_min, tfpoints_max), dim=2)
-
-    # depth
-    _tfpoints3d_bxfx9 = tfpoints3d_bxfx9.permute(2, 0, 1)
-    tfpointsdep_bxfx1 = (
-        _tfpoints3d_bxfx9[2, :, :] + _tfpoints3d_bxfx9[5, :, :] + _tfpoints3d_bxfx9[8, :, :]
-    ).unsqueeze(-1) / 3.0
-
-    return (
-        tfpoints2dmul_bxfx6,
-        tfpointsbbox_bxfx4,
-        tfpointsbbox2_bxfx4,
-        tfpointsdep_bxfx1,
-    )
-
-
-class LinearRasterizer(Function):
-    @staticmethod
-    def forward(
-        ctx,
-        width,
-        height,
-        tfpoints3d_bxfx9,
-        tfpoints2d_bxfx6,
-        tfnormalz_bxfx1,
-        vertex_attr_bxfx3d,
-        expand=None,
-        knum=None,
-        multiplier=None,
-        delta=None,
-        debug=False,
-    ):
-
-        if expand is None:
-            expand = 0.02
-        if knum is None:
-            knum = 30
-        if multiplier is None:
-            multiplier = 1000
-        if delta is None:
-            delta = 7000
-
-        batch_size = tfpoints3d_bxfx9.shape[0]
-        num_faces = tfpoints3d_bxfx9.shape[1]
-
-        num_vertex_attr = vertex_attr_bxfx3d.shape[2] / 3
-        assert num_vertex_attr == int(
-            num_vertex_attr
-        ), "vertex_attr_bxfx3d has shape {} which is not a multiple of 3".format(vertex_attr_bxfx3d.shape[2])
-
-        num_vertex_attr = int(num_vertex_attr)
-
-        ###################################################
-        start = datetime.datetime.now()
-
-        (tfpoints2dmul_bxfx6, tfpointsbbox_bxfx4, tfpointsbbox2_bxfx4, tfpointsdep_bxfx1,) = prepare_tfpoints(
-            tfpoints3d_bxfx9,
-            tfpoints2d_bxfx6,
-            multiplier,
-            batch_size,
-            num_faces,
-            expand,
-        )
-
-        device = tfpoints2dmul_bxfx6.device
-
-        # output
-        tfimidxs_bxhxwx1 = torch.zeros(batch_size, height, width, 1, dtype=torch.float32, device=device)
-        # set depth as very far
-        tfimdeps_bxhxwx1 = torch.full(
-            (batch_size, height, width, 1),
-            fill_value=-1000.0,
-            dtype=torch.float32,
-            device=device,
-        )
-        tfimweis_bxhxwx3 = torch.zeros(batch_size, height, width, 3, dtype=torch.float32, device=device)
-        tfims_bxhxwxd = torch.zeros(
-            batch_size,
-            height,
-            width,
-            num_vertex_attr,
-            dtype=torch.float32,
-            device=device,
-        )
-        tfimprob_bxhxwx1 = torch.zeros(batch_size, height, width, 1, dtype=torch.float32, device=device)
-
-        # intermidiate varibales
-        tfprobface = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
-        tfprobcase = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
-        tfprobdis = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
-        tfprobdep = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
-        tfprobacc = torch.zeros(batch_size, height, width, knum, dtype=torch.float32, device=device)
-
-        # face direction
-        tfpointsdirect_bxfx1 = tfnormalz_bxfx1.contiguous()
-        cuda_rasterizer.forward(
-            tfpoints3d_bxfx9,
-            tfpoints2dmul_bxfx6,
-            tfpointsdirect_bxfx1,
-            tfpointsbbox_bxfx4,
-            tfpointsbbox2_bxfx4,
-            tfpointsdep_bxfx1,
-            vertex_attr_bxfx3d,
-            tfimidxs_bxhxwx1,
-            tfimdeps_bxhxwx1,
-            tfimweis_bxhxwx3,
-            tfprobface,
-            tfprobcase,
-            tfprobdis,
-            tfprobdep,
-            tfprobacc,
-            tfims_bxhxwxd,
-            tfimprob_bxhxwx1,
-            multiplier,
-            delta,
-        )
-
-        end = datetime.datetime.now()
-        ###################################################
-
-        if debug:
-            print(end - start)
-            ims_bxhxwxd = tfims_bxhxwxd.detach().cpu().numpy()
-            improbs_bxhxwx1 = tfimprob_bxhxwx1.detach().cpu().numpy()
-            imidxs_bxhxwx1 = tfimidxs_bxhxwx1.detach().cpu().numpy()
-            imdeps_bxhxwx1 = tfimdeps_bxhxwx1.detach().cpu().numpy()
-            imweis_bxhxwx3 = tfimweis_bxhxwx3.detach().cpu().numpy()
-
-            print(ims_bxhxwxd.shape)
-            print(improbs_bxhxwx1.shape)
-            print(np.max(improbs_bxhxwx1))
-
-            cv2.imshow("0", ims_bxhxwxd[-1, :, :, :3])
-            cv2.imshow("1", improbs_bxhxwx1[-1])
-            cv2.imshow("2", imweis_bxhxwx3[-1])
-            cv2.imshow("3", imidxs_bxhxwx1[-1] / num_faces)
-            cv2.imshow("4", imdeps_bxhxwx1[-1])
-            cv2.waitKey()
-
-        debug_im = torch.zeros(batch_size, height, width, 3, dtype=torch.float32, device=device)
-
-        ctx.save_for_backward(
-            tfims_bxhxwxd,
-            tfimprob_bxhxwx1,
-            tfimidxs_bxhxwx1,
-            tfimweis_bxhxwx3,
-            tfpoints2dmul_bxfx6,
-            vertex_attr_bxfx3d,
-            tfprobface,
-            tfprobcase,
-            tfprobdis,
-            tfprobdep,
-            tfprobacc,
-            debug_im,
-        )
-
-        ctx.multiplier = multiplier
-        ctx.delta = delta
-        ctx.debug = debug
-
-        tfims_bxhxwxd.requires_grad = True
-        tfimprob_bxhxwx1.requires_grad = True
-
-        return tfims_bxhxwxd, tfimprob_bxhxwx1
-
-    @staticmethod
-    def backward(ctx, dldI_bxhxwxd, dldp_bxhxwx1):
-        (
-            tfims_bxhxwxd,
-            tfimprob_bxhxwx1,
-            tfimidxs_bxhxwx1,
-            tfimweis_bxhxwx3,
-            tfpoints2dmul_bxfx6,
-            tfcolors_bxfx3d,
-            tfprobface,
-            tfprobcase,
-            tfprobdis,
-            tfprobdep,
-            tfprobacc,
-            debug_im,
-        ) = ctx.saved_variables
-
-        multiplier = ctx.multiplier
-        delta = ctx.delta
-        debug = ctx.debug
-        # avoid numeric error
-        # multiplier = 1000
-        # tfpoints2d_bxfx6 *= multiplier
-
-        dldp2 = torch.zeros_like(tfpoints2dmul_bxfx6)
-        dldp2_prob = torch.zeros_like(tfpoints2dmul_bxfx6)
-        dldc = torch.zeros_like(tfcolors_bxfx3d)
-        cuda_rasterizer.backward(
-            dldI_bxhxwxd.contiguous(),
-            dldp_bxhxwx1.contiguous(),
-            tfims_bxhxwxd,
-            tfimprob_bxhxwx1,
-            tfimidxs_bxhxwx1,
-            tfimweis_bxhxwx3,
-            tfprobface,
-            tfprobcase,
-            tfprobdis,
-            tfprobdep,
-            tfprobacc,
-            tfpoints2dmul_bxfx6,
-            tfcolors_bxfx3d,
-            dldp2,
-            dldc,
-            dldp2_prob,
-            debug_im,
-            multiplier,
-            delta,
-        )
-        if debug:
-            print(dldc[dldc > 0.1])
-            print(dldc[dldc > 0.1].shape)
-            print(dldp2[dldp2 > 0.1])
-            print(dldp2[dldp2 > 0.1].shape)
-            print(dldp2_prob[dldp2_prob > 0.1])
-            print(dldp2_prob[dldp2_prob > 0.1].shape)
-
-        return (
-            None,
-            None,
-            None,
-            dldp2 + dldp2_prob,
-            None,
-            dldc,
-            None,
-            None,
-            None,
-            None,
-            None,
-            None,
-        )
-
-
-linear_rasterizer = LinearRasterizer.apply
diff --git a/lib/dr_utils/dib_renderer_x/renderer/__init__.py b/lib/dr_utils/dib_renderer_x/renderer/__init__.py
deleted file mode 100644
index 8c4b3180bf8aa0d2b132bfc3a7e865794bc8d9cc..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .base import *
-from .phongrender import *
-from .shrender import *
-from .texrender import *
-from .vcrender import *
diff --git a/lib/dr_utils/dib_renderer_x/renderer/base.py b/lib/dr_utils/dib_renderer_x/renderer/base.py
deleted file mode 100644
index d7f8098f4ceb2ea2fc292c640d91783b13e50828..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/base.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-from core.utils.pose_utils import quat2mat_torch
-
-from ..utils import perspectiveprojectionnp, projectiveprojection_real
-from .phongrender import PhongRender
-from .shrender import SHRender
-from .texrender import TexRender as Lambertian
-from .vcrender import VCRender
-from .vcrender_batch import VCRenderBatch
-from .vcrender_multi import VCRenderMulti
-from .texrender_multi import TexRenderMulti
-from .texrender_batch import TexRenderBatch
-import numpy as np
-import torch
-import torch.nn as nn
-
-# renderers = {'VertexColor': VCRender, 'Lambertian': Lambertian, 'SphericalHarmonics': SHRender, 'Phong': PhongRender}
-renderers = {
-    "VertexColor": VCRender,
-    "VertexColorMulti": VCRenderMulti,
-    "VertexColorBatch": VCRenderBatch,
-    "Lambertian": Lambertian,
-    "Texture": Lambertian,  # alias
-    "TextureMulti": TexRenderMulti,
-    "TextureBatch": TexRenderBatch,
-    "SphericalHarmonics": SHRender,
-    "Phong": PhongRender,
-}
-
-
-class Renderer(nn.Module):
-    def __init__(
-        self,
-        height,
-        width,
-        mode="VertexColor",
-        camera_center=None,
-        camera_up=None,
-        camera_fov_y=None,
-    ):
-        super(Renderer, self).__init__()
-        assert mode in renderers, "Passed mode {0} must in in list of accepted modes: {1}".format(mode, renderers)
-        self.mode = mode
-
-        yz_flip = np.eye(3, dtype=np.float32)
-        yz_flip[1, 1], yz_flip[2, 2] = -1, -1
-        self.yz_flip = torch.tensor(yz_flip, device="cuda:0")
-
-        self.renderer = renderers[mode](height, width)
-        if camera_center is None:
-            self.camera_center = np.array([0, 0, 0], dtype=np.float32)
-        if camera_up is None:
-            self.camera_up = np.array([0, 1, 0], dtype=np.float32)
-        if camera_fov_y is None:
-            self.camera_fov_y = 49.13434207744484 * np.pi / 180.0
-        self.camera_params = None
-
-    def forward(self, points, *args, **kwargs):
-
-        if self.camera_params is None:
-            print(
-                "Camera parameters have not been set, default perspective parameters of distance = 1, elevation = 30, azimuth = 0 are being used"
-            )
-            self.set_look_at_parameters([0], [30], [1])
-
-        if self.mode in [
-            "VertexColorMulti",
-            "VertexColorBatch",
-            "TextureMulti",
-            "TextureBatch",
-        ]:
-            assert self.camera_params[0].shape[0] == len(
-                points
-            ), "multi mode need the same length of camera parameters and points"
-        else:
-            assert (
-                self.camera_params[0].shape[0] == points[0].shape[0]
-            ), "Set camera parameters batch size must equal\
-                batch size of passed points"
-
-        return self.renderer(points, self.camera_params, *args, **kwargs)
-
-    def set_look_at_parameters(self, azimuth, elevation, distance):
-        from kaolin.mathutils.geometry.transformations import (
-            compute_camera_params,
-        )
-
-        camera_projection_mtx = perspectiveprojectionnp(self.camera_fov_y, 1.0)
-        camera_projection_mtx = torch.FloatTensor(camera_projection_mtx).cuda()
-
-        camera_view_mtx = []
-        camera_view_shift = []
-        for a, e, d in zip(azimuth, elevation, distance):
-            mat, pos = compute_camera_params(a, e, d)
-            camera_view_mtx.append(mat)
-            camera_view_shift.append(pos)
-        camera_view_mtx = torch.stack(camera_view_mtx).cuda()
-        camera_view_shift = torch.stack(camera_view_shift).cuda()
-
-        self.camera_params = [
-            camera_view_mtx,
-            camera_view_shift,
-            camera_projection_mtx,
-        ]
-
-    def set_camera_parameters(self, parameters):
-        self.camera_params = parameters
-
-    def set_camera_parameters_from_RT_K(self, Rs, ts, Ks, height, width, near=0.01, far=10.0, rot_type="mat"):
-        """
-        Rs: a list of rotations tensor
-        ts: a list of translations tensor
-        Ks: a list of camera intrinsic matrices or a single matrix
-        ----
-        [cam_view_R, cam_view_pos, cam_proj]
-        """
-        """
-        aspect_ratio = width / height
-        fov_x, fov_y = K_to_fov(K, height, width)
-        # camera_projection_mtx = perspectiveprojectionnp(self.camera_fov_y,
-        #         ratio=aspect_ratio, near=near, far=far)
-        camera_projection_mtx = perspectiveprojectionnp(fov_y,
-                ratio=aspect_ratio, near=near, far=far)
-        """
-        assert rot_type in ["mat", "quat"], rot_type
-        bs = len(Rs)
-        single_K = False
-        if isinstance(Ks, (np.ndarray, torch.Tensor)) and Ks.ndim == 2:
-            K = Ks
-            camera_proj_mtx = projectiveprojection_real(K, 0, 0, width, height, near, far)
-            camera_proj_mtx = torch.as_tensor(camera_proj_mtx).float().cuda()  # 4x4
-            single_K = True
-
-        camera_view_mtx = []
-        camera_view_shift = []
-        if not single_K:
-            camera_proj_mtx = []
-        for i in range(bs):
-            R = Rs[i]
-            t = ts[i]
-            if not isinstance(R, torch.Tensor):
-                R = torch.tensor(R, dtype=torch.float32, device="cuda:0")
-            if not isinstance(t, torch.Tensor):
-                t = torch.tensor(t, dtype=torch.float32, device="cuda:0")
-            if rot_type == "quat":
-                R = quat2mat_torch(R.unsqueeze(0))[0]
-            cam_view_R = torch.matmul(self.yz_flip.to(R), R)
-            cam_view_t = -(torch.matmul(R.t(), t))  # cam pos
-
-            camera_view_mtx.append(cam_view_R)
-            camera_view_shift.append(cam_view_t)
-            if not single_K:
-                K = Ks[i]
-                cam_proj_mtx = projectiveprojection_real(K, 0, 0, width, height, near, far)
-                cam_proj_mtx = torch.as_tensor(cam_proj_mtx).float().cuda()  # 4x4
-                camera_proj_mtx.append(cam_proj_mtx)
-        camera_view_mtx = torch.stack(camera_view_mtx).cuda()  # bx3x3
-        camera_view_shift = torch.stack(camera_view_shift).cuda()  # bx3
-        if not single_K:
-            camera_proj_mtx = torch.stack(camera_proj_mtx)  # bx3x1 or bx4x4
-
-        # print("camera view matrix: \n", camera_view_mtx, camera_view_mtx.shape) # bx3x3, camera rot?
-        # print('camera view shift: \n', camera_view_shift, camera_view_shift.shape) # bx3, camera trans?
-        # print('camera projection mat: \n', camera_proj_mtx, camera_proj_mtx.shape) # projection matrix, 3x1
-        self.camera_params = [
-            camera_view_mtx,
-            camera_view_shift,
-            camera_proj_mtx,
-        ]
-        # self.rot_type = rot_type
-
-
-def K_to_fov(K, height, width):
-    fx = K[0, 0]
-    fy = K[1, 1]
-    fov_x = 2 * np.arctan2(width, 2 * fx)  # radian
-    fov_y = 2 * np.arctan2(height, 2 * fy)
-    return fov_x, fov_y
diff --git a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/__init__.py b/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/__init__.py
deleted file mode 100644
index 38682fdd22746a24dba94c196b25bd915d65086f..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .frag_phongtex import *
-from .frag_shtex import *
-from .frag_tex import *
-from .interpolation import *
diff --git a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_phongtex.py b/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_phongtex.py
deleted file mode 100644
index a77608374c856582c8186d2dca9739cbb0e4d036..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_phongtex.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-from .interpolation import texinterpolation
-
-
-#####################################################
-def fragmentshader(
-    imnormal1_bxhxwx3,
-    lightdirect1_bx3,
-    eyedirect1_bxhxwx3,
-    material_bx3x3,
-    shininess_bx1,
-    imtexcoord_bxhxwx2,
-    texture_bx3xthxtw,
-    improb_bxhxwx1,
-):
-    # parallel light
-    lightdirect1_bx1x1x3 = lightdirect1_bx3.view(-1, 1, 1, 3)
-
-    # lambertian
-    cosTheta_bxhxwx1 = torch.sum(imnormal1_bxhxwx3 * lightdirect1_bx1x1x3, dim=3, keepdim=True)
-    cosTheta_bxhxwx1 = torch.clamp(cosTheta_bxhxwx1, 0, 1)
-
-    # specular
-    reflect = -lightdirect1_bx1x1x3 + 2 * cosTheta_bxhxwx1 * imnormal1_bxhxwx3
-    cosAlpha_bxhxwx1 = torch.sum(reflect * eyedirect1_bxhxwx3, dim=3, keepdim=True)
-    cosAlpha_bxhxwx1 = torch.clamp(cosAlpha_bxhxwx1, 1e-5, 1)  # should not be 0 since nan error
-    cosAlpha_bxhxwx1 = torch.pow(cosAlpha_bxhxwx1, shininess_bx1.view(-1, 1, 1, 1))  # shininess should be large than 0
-
-    # simplified model
-    # light color is [1, 1, 1]
-    MatAmbColor_bx1x1x3 = material_bx3x3[:, 0:1, :].view(-1, 1, 1, 3)
-    MatDifColor_bxhxwx3 = material_bx3x3[:, 1:2, :].view(-1, 1, 1, 3) * cosTheta_bxhxwx1
-    MatSpeColor_bxhxwx3 = material_bx3x3[:, 2:3, :].view(-1, 1, 1, 3) * cosAlpha_bxhxwx1
-
-    # tex color
-    texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw)
-
-    # ambient and diffuse rely on object color while specular doesn't
-    color = (MatAmbColor_bx1x1x3 + MatDifColor_bxhxwx3) * texcolor_bxhxwx3 + MatSpeColor_bxhxwx3
-    color = color * improb_bxhxwx1
-
-    return torch.clamp(color, 0, 1)
diff --git a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_shtex.py b/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_shtex.py
deleted file mode 100644
index 06bb3814423bce40f445718a781e85c62ed521ab..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_shtex.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-from .interpolation import texinterpolation
-
-
-def fragmentshader(
-    imnormal1_bxhxwx3,
-    lightparam_bx9,
-    imtexcoord_bxhxwx2,
-    texture_bx3xthxtw,
-    improb_bxhxwx1,
-):
-
-    # light effect
-    x = imnormal1_bxhxwx3[:, :, :, 0:1]
-    y = imnormal1_bxhxwx3[:, :, :, 1:2]
-    z = imnormal1_bxhxwx3[:, :, :, 2:3]
-
-    # spherical harmonic parameters
-    band0 = 0.2820948 * torch.ones_like(x)
-    band10 = -0.3257350 * y
-    band11 = 0.3257350 * z
-    band12 = -0.3257350 * x
-    band20 = 0.2731371 * (x * y)
-    band21 = -0.2731371 * (y * z)
-    band22 = 0.1365686 * (z * z) - 0.0788479
-    band23 = -0.1931371 * (x * z)
-    band24 = 0.1365686 * (x * x - y * y)
-
-    bands = torch.cat(
-        (
-            band0,
-            band10,
-            band11,
-            band12,
-            band20,
-            band21,
-            band22,
-            band23,
-            band24,
-        ),
-        dim=3,
-    )
-    coef = torch.sum(bands * lightparam_bx9.view(-1, 1, 1, 9), dim=3, keepdim=True)
-
-    # tex color
-    texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw)
-
-    # merge
-    color = coef * texcolor_bxhxwx3 * improb_bxhxwx1
-
-    return torch.clamp(color, 0, 1)
diff --git a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_tex.py b/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_tex.py
deleted file mode 100644
index f40d8d16d595c9d56f6fe81765d4618be503b0dc..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/frag_tex.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-from .interpolation import texinterpolation
-
-
-################################################
-def fragmentshader(imtexcoord_bxhxwx2, texture_bx3xthxtw, improb_bxhxwx1, filtering="nearest"):
-
-    # interpolation
-    texcolor_bxhxwx3 = texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw, filtering=filtering)
-
-    # mask
-    color = texcolor_bxhxwx3 * improb_bxhxwx1
-
-    return torch.clamp(color, 0, 1)
diff --git a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/interpolation.py b/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/interpolation.py
deleted file mode 100644
index b683d815ddc6d7b33920e8fa757ce3cb8308e373..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/fragment_shaders/interpolation.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-
-################################################
-def texinterpolation(imtexcoord_bxhxwx2, texture_bx3xthxtw, filtering="nearest"):
-    """Note that opengl tex coord is different from pytorch coord ogl coord
-    ranges from 0 to 1, y axis is from bottom to top and it supports circular
-    mode(-0.1 is the same as 0.9) pytorch coord ranges from -1 to 1, y axis is
-    from top to bottom and does not support circular.
-
-    filtering is the same as the mode parameter for
-    torch.nn.functional.grid_sample.
-    """
-
-    # convert coord mode from ogl to pytorch
-    imtexcoord_bxhxwx2 = torch.remainder(imtexcoord_bxhxwx2, 1.0)
-    imtexcoord_bxhxwx2 = imtexcoord_bxhxwx2 * 2 - 1  # [0, 1] to [-1, 1]
-    imtexcoord_bxhxwx2[:, :, :, 1] = -1.0 * imtexcoord_bxhxwx2[:, :, :, 1]  # reverse y
-
-    # sample
-    texcolor = torch.nn.functional.grid_sample(texture_bx3xthxtw, imtexcoord_bxhxwx2, mode=filtering)
-    texcolor = texcolor.permute(0, 2, 3, 1)
-
-    return texcolor
diff --git a/lib/dr_utils/dib_renderer_x/renderer/phongrender.py b/lib/dr_utils/dib_renderer_x/renderer/phongrender.py
deleted file mode 100644
index e8784c9fa68e83760a02109bc51da2cabb7e5a21..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/phongrender.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .fragment_shaders.frag_phongtex import fragmentshader
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-
-##################################################################
-class PhongRender(nn.Module):
-    def __init__(self, height, width):
-        super(PhongRender, self).__init__()
-
-        self.height = height
-        self.width = width
-
-        # render with point normal or not
-        self.smooth = False
-
-    def set_smooth(self, pfmtx):
-        self.smooth = True
-        self.pfmtx = torch.from_numpy(pfmtx).view(1, pfmtx.shape[0], pfmtx.shape[1]).cuda()
-
-    def forward(
-        self,
-        points,
-        cameras,
-        uv_bxpx2,
-        texture_bx3xthxtw,
-        lightdirect_bx3,
-        material_bx3x3,
-        shininess_bx1,
-        ft_fx3=None,
-    ):
-        """
-        points: [points_bxpx3, faces_fx3]
-        cameras: camera parameters
-            [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        """
-
-        assert lightdirect_bx3 is not None, "When using the Phong model, light parameters must be passed"
-        assert material_bx3x3 is not None, "When using the Phong model, material parameters must be passed"
-        assert shininess_bx1 is not None, "When using the Phong model, shininess parameters must be passed"
-
-        ##############################################################
-        # first, MVP projection in vertexshader
-        points_bxpx3, faces_fx3 = points
-
-        # use faces_fx3 as ft_fx3 if not given
-        if ft_fx3 is None:
-            ft_fx3 = faces_fx3
-
-        # camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
-
-        points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
-
-        ################################################################
-        # normal
-
-        # decide which faces are front and which faces are back
-        normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
-        # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-        # normalize normal
-        normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
-
-        ####################################################
-        # smooth or not
-        if self.smooth:
-            normal_bxpx3 = torch.matmul(self.pfmtx.repeat(normal_bxfx3.shape[0], 1, 1), normal_bxfx3)
-            n0 = normal_bxpx3[:, faces_fx3[:, 0], :]
-            n1 = normal_bxpx3[:, faces_fx3[:, 1], :]
-            n2 = normal_bxpx3[:, faces_fx3[:, 2], :]
-            normal_bxfx9 = torch.cat((n0, n1, n2), dim=2)
-        else:
-            normal_bxfx9 = normal_bxfx3.repeat(1, 1, 3)
-
-        ############################################################
-        # second, rasterization
-        fnum = normal1_bxfx3.shape[1]
-        bnum = normal1_bxfx3.shape[0]
-
-        # we have uv, normal, eye to interpolate
-        c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
-        c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
-        c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
-        mask = torch.ones_like(c0[:, :, :1])
-        uv_bxfx3x3 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2).view(bnum, fnum, 3, -1)
-
-        # normal & eye direction
-        normal_bxfx3x3 = normal_bxfx9.view(bnum, fnum, 3, -1)
-        eyedirect_bxfx9 = -points3d_bxfx9
-        eyedirect_bxfx3x3 = eyedirect_bxfx9.view(-1, fnum, 3, 3)
-
-        feat = torch.cat((normal_bxfx3x3, eyedirect_bxfx3x3, uv_bxfx3x3), dim=3)
-        feat = feat.view(bnum, fnum, -1)
-        imfeature, improb_bxhxwx1 = linear_rasterizer(
-            self.width,
-            self.height,
-            points3d_bxfx9,
-            points2d_bxfx6,
-            normalz_bxfx1,
-            feat,
-        )
-
-        ##################################################################
-        imnormal = imfeature[:, :, :, :3]
-        imeye = imfeature[:, :, :, 3:6]
-        imtexcoords = imfeature[:, :, :, 6:8]
-        immask = imfeature[:, :, :, 8:9]
-
-        # normalize
-        imnormal1 = datanormalize(imnormal, axis=3)
-        lightdirect_bx3 = datanormalize(lightdirect_bx3, axis=1)
-        imeye1 = datanormalize(imeye, axis=3)
-
-        imrender = fragmentshader(
-            imnormal1,
-            lightdirect_bx3,
-            imeye1,
-            material_bx3x3,
-            shininess_bx1,
-            imtexcoords,
-            texture_bx3xthxtw,
-            immask,
-        )
-        # return imrender, improb_bxhxwx1, normal1_bxfx3
-        return imrender, improb_bxhxwx1, normal1_bxfx3, immask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/shrender.py b/lib/dr_utils/dib_renderer_x/renderer/shrender.py
deleted file mode 100644
index a8e17ca8516fb18c3330e1e87c03851ca4eb0b37..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/shrender.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .fragment_shaders.frag_shtex import fragmentshader
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-
-##################################################################
-class SHRender(nn.Module):
-    def __init__(self, height, width):
-        super(SHRender, self).__init__()
-
-        self.height = height
-        self.width = width
-
-        # render with point normal or not
-        self.smooth = False
-
-    def set_smooth(self, pfmtx):
-        self.smooth = True
-        self.pfmtx = pfmtx
-
-    def forward(
-        self,
-        points,
-        cameras,
-        uv_bxpx2,
-        texture_bx3xthxtw,
-        lightparam,
-        ft_fx3=None,
-    ):
-        """
-        points: [points_bxpx3, faces_fx3]
-        cameras: camera parameters
-            [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        """
-        assert lightparam is not None, "When using the Spherical Harmonics model, light parameters must be passed"
-
-        ##############################################################
-        # first, MVP projection in vertexshader
-        points_bxpx3, faces_fx3 = points
-
-        # use faces_fx3 as ft_fx3 if not given
-        if ft_fx3 is None:
-            ft_fx3 = faces_fx3
-
-        # camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
-
-        points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
-
-        ################################################################
-        # normal
-
-        # decide which faces are front and which faces are back
-        normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
-        # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-        # normalize normal
-        normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
-
-        ####################################################
-        # smooth or not
-        if self.smooth:
-            normal_bxpx3 = torch.matmul(self.pfmtx, normal_bxfx3)
-            n0 = normal_bxpx3[:, faces_fx3[:, 0], :]
-            n1 = normal_bxpx3[:, faces_fx3[:, 1], :]
-            n2 = normal_bxpx3[:, faces_fx3[:, 2], :]
-            normal_bxfx9 = torch.cat((n0, n1, n2), dim=2)
-        else:
-            normal_bxfx9 = normal_bxfx3.repeat(1, 1, 3)
-
-        #########################################################
-        # second, rasterization
-        fnum = normal1_bxfx3.shape[1]
-        bnum = normal1_bxfx3.shape[0]
-
-        c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
-        c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
-        c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
-        mask = torch.ones_like(c0[:, :, :1])
-        uv_bxfx3x3 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2).view(bnum, fnum, 3, -1)
-
-        # normal
-        normal_bxfx3x3 = normal_bxfx9.view(bnum, fnum, 3, -1)
-        feat = torch.cat((normal_bxfx3x3, uv_bxfx3x3), dim=3)
-        feat = feat.view(bnum, fnum, -1)
-
-        imfeat, improb_bxhxwx1 = linear_rasterizer(
-            self.width,
-            self.height,
-            points3d_bxfx9,
-            points2d_bxfx6,
-            normalz_bxfx1,
-            feat,
-        )
-        imnormal_bxhxwx3 = imfeat[:, :, :, :3]
-        imtexcoords = imfeat[:, :, :, 3:5]
-        hardmask = imfeat[:, :, :, 5:]
-
-        ####################################################
-        # fragrement shader
-        # parallel light
-        imnormal1_bxhxwx3 = datanormalize(imnormal_bxhxwx3, axis=3)
-        imrender = fragmentshader(
-            imnormal1_bxhxwx3,
-            lightparam,
-            imtexcoords,
-            texture_bx3xthxtw,
-            hardmask,
-        )
-
-        # return imrender, improb_bxhxwx1, normal1_bxfx3
-        return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/texrender.py b/lib/dr_utils/dib_renderer_x/renderer/texrender.py
deleted file mode 100644
index 35499e24032c7501f1353f8bb48d222bdd7f51ff..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/texrender.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .fragment_shaders.frag_tex import fragmentshader
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-
-##################################################################
-class TexRender(nn.Module):
-    def __init__(self, height, width, filtering="nearest"):
-        super(TexRender, self).__init__()
-
-        self.height = height
-        self.width = width
-        self.filtering = filtering
-
-    def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):
-        """
-        points: points_bxpx3, faces_fx3
-        cameras: camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1
-        """
-
-        ##############################################################
-        # first, MVP projection in vertexshader
-        points_bxpx3, faces_fx3 = points
-
-        # use faces_fx3 as ft_fx3 if not given
-        if ft_fx3 is None:
-            ft_fx3 = faces_fx3
-
-        # camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
-
-        points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
-
-        ################################################################
-        # normal
-
-        # decide which faces are front and which faces are back
-        normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
-        # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-        # normalize normal
-        normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
-
-        ############################################################
-        # second, rasterization
-        c0 = uv_bxpx2[:, ft_fx3[:, 0], :]
-        c1 = uv_bxpx2[:, ft_fx3[:, 1], :]
-        c2 = uv_bxpx2[:, ft_fx3[:, 2], :]
-        mask = torch.ones_like(c0[:, :, :1])
-        uv_bxfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-        imfeat, improb_bxhxwx1 = linear_rasterizer(
-            self.width,
-            self.height,
-            points3d_bxfx9,
-            points2d_bxfx6,
-            normalz_bxfx1,
-            uv_bxfx9,
-        )
-
-        imtexcoords = imfeat[:, :, :, :2]
-        hardmask = imfeat[:, :, :, 2:3]
-
-        # fragrement shader
-        imrender = fragmentshader(imtexcoords, texture_bx3xthxtw, hardmask, filtering=self.filtering)
-
-        # return imrender, improb_bxhxwx1, normal1_bxfx3
-        return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/texrender_batch.py b/lib/dr_utils/dib_renderer_x/renderer/texrender_batch.py
deleted file mode 100644
index 6bd4c43d1bb24cd8cf6d43a0a43d8b79e138ad34..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/texrender_batch.py
+++ /dev/null
@@ -1,128 +0,0 @@
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .fragment_shaders.frag_tex import fragmentshader
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-import numpy as np
-
-
-##################################################################
-class TexRenderBatch(nn.Module):
-    def __init__(self, height, width, filtering="nearest"):
-        super(TexRenderBatch, self).__init__()
-
-        self.height = height
-        self.width = width
-        self.filtering = filtering
-
-    def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ft_fx3=None):
-        """
-        points: b x [points_1xpx3, faces_fx3]
-        cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        uv_bxpx2: b x [1xpx2]
-        texture_bx3xthxtw: b x [1x3xthxtw]
-        ft_fx3: b x [fx3]
-        """
-        b = len(points)
-        assert b > 0, b
-        points3d_1xfx9_list = []
-        points2d_1xfx6_list = []
-        normalz_1xfx1_list = []
-        normal1_1xfx3_list = []
-        uv_1xfx9_list = []
-
-        single_intrinsic = True
-        if cameras[2].ndim == 3:
-            assert cameras[2].shape[0] == b
-            single_intrinsic = False
-
-        for i in range(b):
-            ##############################################################
-            # first, MVP projection in vertexshader
-            points_1xpx3, faces_fx3 = points[i]
-            if single_intrinsic:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2],
-                ]
-            else:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2][i],
-                ]
-            # use faces_fx3 as ft_fx3 if not given
-            if ft_fx3 is None:
-                ft_fx3_single = faces_fx3
-            else:
-                ft_fx3_single = ft_fx3[i]
-
-            (
-                points3d_1xfx9,
-                points2d_1xfx6,
-                normal_1xfx3,
-            ) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
-
-            ################################################################
-            # normal
-
-            # decide which faces are front and which faces are back
-            normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
-            # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-            # normalize normal
-            normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
-
-            ############################################################
-            # second, rasterization
-            uv_1xpx2 = uv_bxpx2[i]
-
-            c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]
-            c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]
-            c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]
-            mask = torch.ones_like(c0[:, :, :1])
-            uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-            # append data
-            points3d_1xfx9_list.append(points3d_1xfx9)
-            points2d_1xfx6_list.append(points2d_1xfx6)
-            normalz_1xfx1_list.append(normalz_1xfx1)
-            normal1_1xfx3_list.append(normal1_1xfx3)
-            uv_1xfx9_list.append(uv_1xfx9)
-
-        # put the object with larger depth earlier
-
-        # imrender = torch.empty((1, self.height, self.width, 3), device=device, dtype=torch.float32)
-        # improb_1xhxwx1 = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)
-        # fg_mask = torch.empty((1, self.height, self.width, 1), device=device, dtype=torch.float32)
-        ren_ims = []
-        ren_masks = []
-        ren_probs = []
-        for i in range(b):
-            imfeat, improb_1xhxwx1_i = linear_rasterizer(
-                self.width,
-                self.height,
-                points3d_1xfx9_list[i],
-                points2d_1xfx6_list[i],
-                normalz_1xfx1_list[i],
-                uv_1xfx9_list[i],
-            )
-            imtexcoords = imfeat[:, :, :, :2]  # (1,H,W,2)
-            hardmask = imfeat[:, :, :, 2:3]  # (1,H,W,1) mask
-            # fragrement shader
-            texture_1x3xthxtw = texture_bx3xthxtw[i]
-            imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)
-            ren_ims.append(imrender_i)  # 1HW3
-            ren_probs.append(improb_1xhxwx1_i)
-            ren_masks.append(hardmask)
-
-        imrender = torch.cat(ren_ims, dim=0)  # bHW3
-        improb_bxhxwx1 = torch.cat(ren_probs, dim=0)
-        mask_bxhxwx1 = torch.cat(ren_masks, dim=0)
-        # return imrender, improb_1xhxwx1, normal1_1xfx3_list
-        return imrender, improb_bxhxwx1, normal1_1xfx3_list, mask_bxhxwx1
diff --git a/lib/dr_utils/dib_renderer_x/renderer/texrender_multi.py b/lib/dr_utils/dib_renderer_x/renderer/texrender_multi.py
deleted file mode 100644
index 9a175883d4a7145fbdf66547a8fca299c561b063..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/texrender_multi.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .fragment_shaders.frag_tex import fragmentshader
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-import numpy as np
-
-
-##################################################################
-class TexRenderMulti(nn.Module):
-    def __init__(self, height, width, filtering="nearest"):
-        super(TexRenderMulti, self).__init__()
-
-        self.height = height
-        self.width = width
-        self.filtering = filtering
-
-    def forward(self, points, cameras, uv_bxpx2, texture_bx3xthxtw, ts, ft_fx3=None):
-        """
-        points: b x [points_1xpx3, faces_fx3]
-        cameras: [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        uv_bxpx2: b x [1xpx2]
-        texture_bx3xthxtw: b x [1x3xthxtw]
-        ts: list of translations
-        ft_fx3: b x [fx3]
-        """
-        b = len(points)
-        points3d_1xfx9_list = []
-        points2d_1xfx6_list = []
-        normalz_1xfx1_list = []
-        normal1_1xfx3_list = []
-        uv_1xfx9_list = []
-        distances = np.array([t[2] for t in ts])
-        dist_inds = np.argsort(distances)[::-1]  # descending order
-
-        single_intrinsic = True
-        if cameras[2].ndim == 3:
-            assert cameras[2].shape[0] == b
-            single_intrinsic = False
-
-        for i in range(b):
-            ##############################################################
-            # first, MVP projection in vertexshader
-            points_1xpx3, faces_fx3 = points[i]
-            if single_intrinsic:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2],
-                ]
-            else:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2][i],
-                ]
-            # use faces_fx3 as ft_fx3 if not given
-            if ft_fx3 is None:
-                ft_fx3_single = faces_fx3
-            else:
-                ft_fx3_single = ft_fx3[i]
-
-            (
-                points3d_1xfx9,
-                points2d_1xfx6,
-                normal_1xfx3,
-            ) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
-
-            ################################################################
-            # normal
-
-            # decide which faces are front and which faces are back
-            normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
-            # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-            # normalize normal
-            normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
-
-            ############################################################
-            # second, rasterization
-            uv_1xpx2 = uv_bxpx2[i]
-
-            c0 = uv_1xpx2[:, ft_fx3_single[:, 0], :]
-            c1 = uv_1xpx2[:, ft_fx3_single[:, 1], :]
-            c2 = uv_1xpx2[:, ft_fx3_single[:, 2], :]
-            mask = torch.ones_like(c0[:, :, :1])
-            uv_1xfx9 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-            # append data
-            points3d_1xfx9_list.append(points3d_1xfx9)
-            points2d_1xfx6_list.append(points2d_1xfx6)
-            normalz_1xfx1_list.append(normalz_1xfx1)
-            normal1_1xfx3_list.append(normal1_1xfx3)
-            uv_1xfx9_list.append(uv_1xfx9)
-
-        # put the object with larger depth earlier
-        ren_ims = []
-        ren_masks = []
-        ren_probs = []
-        for dist_ind in dist_inds:  # NOTE: not True but very close
-            imfeat, improb_1xhxwx1_i = linear_rasterizer(
-                self.width,
-                self.height,
-                points3d_1xfx9_list[dist_ind],
-                points2d_1xfx6_list[dist_ind],
-                normalz_1xfx1_list[dist_ind],
-                uv_1xfx9_list[dist_ind],
-            )
-            imtexcoords = imfeat[:, :, :, :2]  # (1,H,W,2)
-            hardmask = imfeat[:, :, :, 2:3]  # (1,H,W,1) mask
-            # fragrement shader
-            texture_1x3xthxtw = texture_bx3xthxtw[dist_ind]
-            imrender_i = fragmentshader(imtexcoords, texture_1x3xthxtw, hardmask)
-            ren_ims.append(imrender_i)
-            ren_probs.append(improb_1xhxwx1_i)
-            ren_masks.append(hardmask)
-
-        for i in range(len(dist_inds)):
-            if i == 0:
-                imrender = ren_ims[0]
-                improb_1xhxwx1 = ren_probs[0]
-                fg_mask = ren_masks[0]
-            else:
-                imrender_i = ren_ims[i]
-                improb_1xhxwx1_i = ren_probs[i]
-                hardmask_i = ren_masks[i]
-                mask_inds = torch.where(hardmask_i[0, :, :, 0] > 0.5)
-                imrender[:, mask_inds[0], mask_inds[1], :] = imrender_i[:, mask_inds[0], mask_inds[1], :]
-                improb_1xhxwx1[:, mask_inds[0], mask_inds[1], :] = improb_1xhxwx1_i[:, mask_inds[0], mask_inds[1], :]
-                fg_mask[:, mask_inds[0], mask_inds[1], :] = hardmask_i[:, mask_inds[0], mask_inds[1], :]
-
-        # return imrender, improb_1xhxwx1, normal1_1xfx3_list
-        # TODO: we can also return instance visible masks, full masks
-        return imrender, improb_1xhxwx1, normal1_1xfx3_list, fg_mask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/vcrender.py b/lib/dr_utils/dib_renderer_x/renderer/vcrender.py
deleted file mode 100644
index 465badf0cf8b838c89fde81a0d5268dac2c6abe0..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/vcrender.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-
-##################################################################
-class VCRender(nn.Module):
-    """Vertex-Color Renderer."""
-
-    def __init__(self, height, width):
-        super(VCRender, self).__init__()
-
-        self.height = height
-        self.width = width
-
-    def forward(self, points, cameras, colors_bxpx3):
-        """
-        points: [points_bxpx3, faces_fx3]
-        cameras: camera parameters
-            [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        """
-
-        ##############################################################
-        # first, MVP projection in vertexshader
-        points_bxpx3, faces_fx3 = points
-
-        # camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
-
-        points3d_bxfx9, points2d_bxfx6, normal_bxfx3 = perspective_projection(points_bxpx3, faces_fx3, cameras)
-
-        ################################################################
-        # normal
-
-        # decide which faces are front and which faces are back
-        normalz_bxfx1 = normal_bxfx3[:, :, 2:3]
-        # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-        # normalize normal
-        normal1_bxfx3 = datanormalize(normal_bxfx3, axis=2)
-
-        ############################################################
-        # second, rasterization
-        c0 = colors_bxpx3[:, faces_fx3[:, 0], :]
-        c1 = colors_bxpx3[:, faces_fx3[:, 1], :]
-        c2 = colors_bxpx3[:, faces_fx3[:, 2], :]
-        mask = torch.ones_like(c0[:, :, :1])
-        color_bxfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-        imfeat, improb_bxhxwx1 = linear_rasterizer(
-            self.width,
-            self.height,
-            points3d_bxfx9,
-            points2d_bxfx6,
-            normalz_bxfx1,
-            color_bxfx12,
-        )
-
-        imrender = imfeat[:, :, :, :3]
-        hardmask = imfeat[:, :, :, 3:]
-
-        # return imrender, improb_bxhxwx1, normal1_bxfx3
-        return imrender, improb_bxhxwx1, normal1_bxfx3, hardmask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/vcrender_batch.py b/lib/dr_utils/dib_renderer_x/renderer/vcrender_batch.py
deleted file mode 100644
index d43dcfe3058e08e76393dca891679cc92cce61e4..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/vcrender_batch.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-from functools import partial
-
-
-def multi_apply(func, *args, **kwargs):
-    pfunc = partial(func, **kwargs) if kwargs else func
-    map_results = map(pfunc, *args)
-    return tuple(map(list, zip(*map_results)))
-
-
-##################################################################
-class VCRenderBatch(nn.Module):
-    """Vertex-Color Renderer Batch (batch rendering for different objects, only
-    one object for each image) The original one only support batch rendering
-    for a single object."""
-
-    def __init__(self, height, width):
-        super(VCRenderBatch, self).__init__()
-
-        self.height = height
-        self.width = width
-
-    def forward(self, points, cameras, colors):
-        """
-        points: b x [points_1xpx3, faces_fx3]
-        cameras: camera parameters
-            [camera_rot_bx3x3, camera_pos_bx3, camera_proj_{b}x3x1]
-        colors_list: b x [colors_1xpx3]
-        """
-        b = len(points)
-        points3d_1xfx9_list = []
-        points2d_1xfx6_list = []
-        normalz_1xfx1_list = []
-        normal1_1xfx3_list = []
-        color_1xfx12_list = []
-
-        single_intrinsic = True
-        if cameras[2].ndim == 3:
-            assert cameras[2].shape[0] == b
-            single_intrinsic = False
-
-        for i in range(b):
-            ##############################################################
-            # first, MVP projection in vertexshader
-            points_1xpx3, faces_fx3 = points[i]
-            if single_intrinsic:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2],
-                ]
-            else:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2][i],
-                ]
-            (
-                points3d_1xfx9,
-                points2d_1xfx6,
-                normal_1xfx3,
-            ) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
-
-            ################################################################
-            # normal
-
-            # decide which faces are front and which faces are back
-            normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
-            # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-            # normalize normal
-            normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
-
-            ############################################################
-            # second, rasterization
-            colors_1xpx3 = colors[i]
-            c0 = colors_1xpx3[:, faces_fx3[:, 0], :]
-            c1 = colors_1xpx3[:, faces_fx3[:, 1], :]
-            c2 = colors_1xpx3[:, faces_fx3[:, 2], :]
-            mask = torch.ones_like(c0[:, :, :1])
-            color_1xfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-            # append data
-            points3d_1xfx9_list.append(points3d_1xfx9)
-            points2d_1xfx6_list.append(points2d_1xfx6)
-            normalz_1xfx1_list.append(normalz_1xfx1)
-            normal1_1xfx3_list.append(normal1_1xfx3)
-            color_1xfx12_list.append(color_1xfx12)
-
-        # points3d_1xFx9 = torch.cat(points3d_1xfx9_list, dim=1)
-        # points2d_1xFx6 = torch.cat(points2d_1xfx6_list, dim=1)
-        # normalz_1xFx1 = torch.cat(normalz_1xfx1_list, dim=1)
-        # normal1_1xFx3 = torch.cat(normal1_1xfx3_list, dim=1)
-        # color_1xFx12 = torch.cat(color_1xfx12_list, dim=1)
-
-        if True:
-            imfeat_list, improb_list = multi_apply(
-                linear_rasterizer,
-                [self.width for _ in range(b)],
-                [self.height for _ in range(b)],
-                points3d_1xfx9_list,
-                points2d_1xfx6_list,
-                normalz_1xfx1_list,
-                color_1xfx12_list,
-            )
-        else:  # debug
-            imfeat_list, improb_list = multi_apply(
-                linear_rasterizer,
-                [self.width for _ in range(b)],
-                [self.height for _ in range(b)],
-                points3d_1xfx9_list,
-                points2d_1xfx6_list,
-                normalz_1xfx1_list,
-                color_1xfx12_list,
-                [0.02 for _ in range(b)],
-                [30 for _ in range(b)],
-                [1000 for _ in range(b)],
-                [7000 for _ in range(b)],
-                [True for _ in range(b)],
-            )  # the last one is debug
-        imfeat = torch.cat(imfeat_list, dim=0)  # [b,H,W,4]
-        improb_bxhxwx1 = torch.cat(improb_list, dim=0)  # [b,H,W,1]
-        imrender = imfeat[:, :, :, :3]  # (b,H,W,3), rgb
-        hardmask = imfeat[:, :, :, 3:]  # (b,H,W,1) mask
-        if False:
-            import cv2
-
-            hardmask_cpu = hardmask.detach().cpu().numpy()[0][:, :, 0]
-            cv2.imshow("hardmask", hardmask_cpu)
-
-        # return imrender, improb_1xhxwx1, normal1_1xFx3
-        return imrender, improb_bxhxwx1, normal1_1xfx3_list, hardmask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/vcrender_multi.py b/lib/dr_utils/dib_renderer_x/renderer/vcrender_multi.py
deleted file mode 100644
index d6d0a612a53091a143f86a9700609d60389a9bb5..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/vcrender_multi.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from __future__ import division
-
-from ..rasterizer import linear_rasterizer
-from ..utils import datanormalize
-from .vertex_shaders.perpsective import perspective_projection
-import torch
-import torch.nn as nn
-
-
-##################################################################
-class VCRenderMulti(nn.Module):
-    """Vertex-Color Renderer."""
-
-    def __init__(self, height, width):
-        super(VCRenderMulti, self).__init__()
-
-        self.height = height
-        self.width = width
-
-    def forward(self, points, cameras, colors):
-        """
-        points: b x [points_1xpx3, faces_fx3]
-        cameras: camera parameters
-            [camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1]
-        colors_list: b x [colors_1xpx3]
-        """
-        b = len(points)
-        points3d_1xfx9_list = []
-        points2d_1xfx6_list = []
-        normalz_1xfx1_list = []
-        normal1_1xfx3_list = []
-        color_1xfx12_list = []
-
-        single_intrinsic = True
-        if cameras[2].ndim == 3:
-            assert cameras[2].shape[0] == b
-            single_intrinsic = False
-
-        for i in range(b):
-            ##############################################################
-            # first, MVP projection in vertexshader
-            points_1xpx3, faces_fx3 = points[i]
-            if single_intrinsic:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2],
-                ]
-            else:
-                cam_params = [
-                    cameras[0][i : i + 1],
-                    cameras[1][i : i + 1],
-                    cameras[2][i],
-                ]
-            cam_params = [
-                cameras[0][i : i + 1],
-                cameras[1][i : i + 1],
-                cameras[2],
-            ]
-            (
-                points3d_1xfx9,
-                points2d_1xfx6,
-                normal_1xfx3,
-            ) = perspective_projection(points_1xpx3, faces_fx3, cam_params)
-
-            ################################################################
-            # normal
-
-            # decide which faces are front and which faces are back
-            normalz_1xfx1 = normal_1xfx3[:, :, 2:3]
-            # normalz_bxfx1 = torch.abs(normalz_bxfx1)
-
-            # normalize normal
-            normal1_1xfx3 = datanormalize(normal_1xfx3, axis=2)
-
-            ############################################################
-            # second, rasterization
-            colors_1xpx3 = colors[i]
-            c0 = colors_1xpx3[:, faces_fx3[:, 0], :]
-            c1 = colors_1xpx3[:, faces_fx3[:, 1], :]
-            c2 = colors_1xpx3[:, faces_fx3[:, 2], :]
-            mask = torch.ones_like(c0[:, :, :1])
-            color_1xfx12 = torch.cat((c0, mask, c1, mask, c2, mask), dim=2)
-
-            # append data
-            points3d_1xfx9_list.append(points3d_1xfx9)
-            points2d_1xfx6_list.append(points2d_1xfx6)
-            normalz_1xfx1_list.append(normalz_1xfx1)
-            normal1_1xfx3_list.append(normal1_1xfx3)
-            color_1xfx12_list.append(color_1xfx12)
-
-        points3d_1xFx9 = torch.cat(points3d_1xfx9_list, dim=1)
-        points2d_1xFx6 = torch.cat(points2d_1xfx6_list, dim=1)
-        normalz_1xFx1 = torch.cat(normalz_1xfx1_list, dim=1)
-        normal1_1xFx3 = torch.cat(normal1_1xfx3_list, dim=1)
-        color_1xFx12 = torch.cat(color_1xfx12_list, dim=1)
-
-        if True:
-            imfeat, improb_1xhxwx1 = linear_rasterizer(
-                self.width,
-                self.height,
-                points3d_1xFx9,
-                points2d_1xFx6,
-                normalz_1xFx1,
-                color_1xFx12,
-            )
-        else:  # debug
-            imfeat, improb_1xhxwx1 = linear_rasterizer(
-                self.width,
-                self.height,
-                points3d_1xFx9,
-                points2d_1xFx6,
-                normalz_1xFx1,
-                color_1xFx12,
-                0.02,
-                30,
-                1000,
-                7000,
-                True,
-            )  # the last one is debug
-        imrender = imfeat[:, :, :, :3]  # (1,H,W,3), rgb
-        hardmask = imfeat[:, :, :, 3:]  # (1,H,W,1) mask
-        if False:
-            import cv2
-
-            hardmask_cpu = hardmask.detach().cpu().numpy()[0][:, :, 0]
-            cv2.imshow("hardmask", hardmask_cpu)
-
-        # return imrender, improb_1xhxwx1, normal1_1xFx3
-        return imrender, improb_1xhxwx1, normal1_1xFx3, hardmask
diff --git a/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/__init__.py b/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/__init__.py
deleted file mode 100644
index fed78931464964011344933fef9f1832f8297f4a..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .perpsective import *
diff --git a/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/perpsective.py b/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/perpsective.py
deleted file mode 100644
index 973b1ae50bfa5450ccb682a93d717b8b72e1bcce..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer/vertex_shaders/perpsective.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-
-##################################################
-def perspective_projection(points_bxpx3, faces_fx3, cameras):
-
-    # perspective, use just one camera intrinc parameter
-    camera_rot_bx3x3, camera_pos_bx3, camera_proj_3x1 = cameras
-    if camera_proj_3x1.shape[-1] == 4:  # 4x4 proj
-        # NOTE: use real camera perspective projection
-        return perspective_projection_real(points_bxpx3, faces_fx3, cameras)
-    cameratrans_rot_bx3x3 = camera_rot_bx3x3.permute(0, 2, 1)
-
-    # follow pixel2mesh!!!
-    # new_p = cam_mat * (old_p - cam_pos)
-    # NOTE: make sure here points_bxpx3 is not in-place modified
-    points_bxpx3 = points_bxpx3 - camera_pos_bx3.view(-1, 1, 3)
-    points_bxpx3 = torch.matmul(points_bxpx3, cameratrans_rot_bx3x3)
-
-    camera_proj_bx1x3 = camera_proj_3x1.view(-1, 1, 3)
-    xy_bxpx3 = points_bxpx3 * camera_proj_bx1x3
-    xy_bxpx2 = xy_bxpx3[:, :, :2] / xy_bxpx3[:, :, 2:3]
-
-    ##########################################################
-    # 1 points
-    pf0_bxfx3 = points_bxpx3[:, faces_fx3[:, 0], :]
-    pf1_bxfx3 = points_bxpx3[:, faces_fx3[:, 1], :]
-    pf2_bxfx3 = points_bxpx3[:, faces_fx3[:, 2], :]
-    points3d_bxfx9 = torch.cat((pf0_bxfx3, pf1_bxfx3, pf2_bxfx3), dim=2)
-
-    xy_f0 = xy_bxpx2[:, faces_fx3[:, 0], :]
-    xy_f1 = xy_bxpx2[:, faces_fx3[:, 1], :]
-    xy_f2 = xy_bxpx2[:, faces_fx3[:, 2], :]
-    points2d_bxfx6 = torch.cat((xy_f0, xy_f1, xy_f2), dim=2)
-
-    ######################################################
-    # 2 normals
-    v01_bxfx3 = pf1_bxfx3 - pf0_bxfx3
-    v02_bxfx3 = pf2_bxfx3 - pf0_bxfx3
-
-    # bs cannot be 3, if it is 3, we must specify dim
-    normal_bxfx3 = torch.cross(v01_bxfx3, v02_bxfx3, dim=2)
-
-    return points3d_bxfx9, points2d_bxfx6, normal_bxfx3
-
-
-def perspective_projection_real(points_bxpx3, faces_fx3, cameras):
-
-    # perspective, use just one camera intrinc parameter
-    camera_rot_bx3x3, camera_pos_bx3, camera_proj_4x4 = cameras
-    cameratrans_rot_bx3x3 = camera_rot_bx3x3.permute(0, 2, 1)
-
-    # follow pixel2mesh!!!
-    # new_p = cam_mat * (old_p - cam_pos)
-    # NOTE: make sure here points_bxpx3 is not in-place modified
-    points_bxpx3 = points_bxpx3 - camera_pos_bx3.view(-1, 1, 3)
-    points_bxpx3 = torch.matmul(points_bxpx3, cameratrans_rot_bx3x3)
-
-    b, p = points_bxpx3.shape[:2]
-    points_bxpx4 = points_bxpx3.new_ones(b, p, 4)
-    points_bxpx4[:, :, :3] = points_bxpx3
-
-    camera_proj_bx4x4 = camera_proj_4x4.view(-1, 4, 4)
-    xy_bxpx4 = torch.matmul(points_bxpx4, camera_proj_bx4x4)
-    xy_bxpx2 = xy_bxpx4[:, :, :2] / xy_bxpx4[:, :, 3:4]
-
-    ##########################################################
-    # 1 points
-    pf0_bxfx3 = points_bxpx3[:, faces_fx3[:, 0], :]
-    pf1_bxfx3 = points_bxpx3[:, faces_fx3[:, 1], :]
-    pf2_bxfx3 = points_bxpx3[:, faces_fx3[:, 2], :]
-    points3d_bxfx9 = torch.cat((pf0_bxfx3, pf1_bxfx3, pf2_bxfx3), dim=2)
-
-    xy_f0 = xy_bxpx2[:, faces_fx3[:, 0], :]
-    xy_f1 = xy_bxpx2[:, faces_fx3[:, 1], :]
-    xy_f2 = xy_bxpx2[:, faces_fx3[:, 2], :]
-    points2d_bxfx6 = torch.cat((xy_f0, xy_f1, xy_f2), dim=2)
-
-    ######################################################
-    # 2 normals
-    v01_bxfx3 = pf1_bxfx3 - pf0_bxfx3
-    v02_bxfx3 = pf2_bxfx3 - pf0_bxfx3
-
-    # bs cannot be 3, if it is 3, we must specify dim
-    normal_bxfx3 = torch.cross(v01_bxfx3, v02_bxfx3, dim=2)
-
-    return points3d_bxfx9, points2d_bxfx6, normal_bxfx3
diff --git a/lib/dr_utils/dib_renderer_x/renderer_dibr.py b/lib/dr_utils/dib_renderer_x/renderer_dibr.py
deleted file mode 100644
index b96a93b74a3717311391c4c3dfddb2f1ca59a2cf..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/renderer_dibr.py
+++ /dev/null
@@ -1,358 +0,0 @@
-import os
-import os.path as osp
-
-import numpy as np
-from . import DIBRenderer
-import torch
-from tqdm import tqdm
-import cv2
-
-from core.utils.pose_utils import quat2mat_torch
-from lib.pysixd import inout, misc
-from lib.dr_utils.rep import TriangleMesh
-
-
-def load_ply_models(
-    obj_paths,
-    texture_paths=None,
-    vertex_scale=0.001,
-    device="cuda",
-    width=512,
-    height=512,
-    tex_resize=False,
-):
-    """
-    NOTE: ignore width and height if tex_resize=False
-    Args:
-        vertex_scale: default 0.001 is used for bop models!
-        tex_resize: resize the texture to smaller size for GPU memory saving
-    Returns:
-        a list of dicts
-    """
-    assert all([".obj" in _path for _path in obj_paths])
-    models = []
-    for i, obj_path in enumerate(tqdm(obj_paths)):
-        model = {}
-        mesh = TriangleMesh.from_obj(obj_path)
-        vertices = mesh.vertices[:, :3]  # x,y,z
-        colors = mesh.vertices[:, 3:6]  # rgb
-        faces = mesh.faces.int()
-
-        # normalize verts ( - center)
-        vertices_max = vertices.max()
-        vertices_min = vertices.min()
-        vertices_middle = (vertices_max + vertices_min) / 2.0
-        vertices = vertices - vertices_middle
-        model["vertices"] = vertices[:, :].to(device)
-
-        model["colors"] = colors[:, :].to(device)
-        model["faces"] = faces[:, :].to(device)  # NOTE: -1
-
-        if texture_paths is not None:
-            texture = cv2.imread(texture_paths[i], cv2.IMREAD_COLOR)[:, :, ::-1].astype(np.float32) / 255.0
-            if tex_resize:
-                texture = cv2.resize(texture, (width, height), interpolation=cv2.INTER_AREA)
-            # CHW
-            texture = torch.from_numpy(texture.transpose(2, 0, 1)).to(device)
-
-            model["face_uvs"] = mesh.uvs[:, :].to(device)
-            model["face_uv_ids"] = mesh.face_textures[:, :].to(device)
-            model["texture"] = texture
-
-            # NOTE: texture_uv is None
-            model["texture_uv"] = None
-
-        models.append(model)
-
-    return models
-
-
-class Renderer_dibr(object):
-    def __init__(self, height, width, mode):
-        self.dib_ren = DIBRenderer(height, width, mode)
-
-    def render_scene(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        K,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        with_mask=False,
-        with_depth=True,
-    ):
-        """render a scene with m>=1 objects
-        Args:
-            Rs: [m,3,3] or [m,4] tensor
-            ts: [m,3,] tensor
-            models: list of dicts, each stores {"vertices":, "colors":, "faces":, }
-            K: [3,3]
-        Returns:
-            a dict:
-                color: (h,w,3)
-                mask: (h,w) fg mask
-                depth: (h,w)
-        """
-        ret = {}
-        self.scene_ren = DIBRenderer(height, width, mode="VertexColorMulti")
-        self.scene_ren.set_camera_parameters_from_RT_K(
-            Rs, ts, K, height, width, near=znear, far=zfar, rot_type=rot_type
-        )
-        colors = [model["colors"][None] for model in models]  # m * [1, p, 3]
-        points = [[model["vertices"][None], model["faces"].long()] for model in models]
-
-        # points: list of [vertices, faces]
-        # colors: list of colors
-        color, im_prob, _, im_mask = self.scene_ren.forward(points=points, colors=colors)
-
-        ret["color"] = color.squeeze()
-        ret["prob"] = im_prob.squeeze()
-        ret["mask"] = im_mask.squeeze()
-        if with_depth:
-            # transform xyz
-            if not isinstance(Rs, torch.Tensor):
-                Rs = torch.stack(Rs)  # list
-            if rot_type == "quat":
-                R_mats = quat2mat_torch(Rs)
-            else:
-                R_mats = Rs
-            xyzs = [
-                misc.transform_pts_Rt_th(model["vertices"], R_mats[_id], ts[_id])[None]
-                for _id, model in enumerate(models)
-            ]
-            ren_xyzs, _, _, _ = self.scene_ren.forward(points=points, colors=xyzs)
-            ret["depth"] = ren_xyzs[0, :, :, 2]  # bhw
-
-        # color: hw3; mask: hw; depth: hw
-        return ret
-
-    def render_scene_tex(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        K,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        uv_type="vertex",
-        with_mask=False,
-        with_depth=True,
-    ):
-        """render a scene with m>=1 object for textured objects
-        Args:
-            Rs: [m,3,3] or [m,4] tensor
-            ts: [m,3] tensor
-            models: list of dict, each stores
-                vertex uv: {"vertices":, "faces":, "texture":, "vertex_uvs":,}
-                face uv: {"vertices":, "faces":, "texture":, "face_uvs":, "face_uv_ids":,}
-            K: [3,3]
-            uv_type: `vertex` | `face`
-        Returns:
-            dict:
-                color: (h,w,3)
-                mask: (h,w) fg mask (to get instance masks, use batch mode)
-                depth: (h,w)
-        """
-        ret = {}
-        self.scene_ren = DIBRenderer(height, width, mode="TextureMulti")
-        self.scene_ren.set_camera_parameters_from_RT_K(
-            Rs, ts, K, height, width, near=znear, far=zfar, rot_type=rot_type
-        )
-        # points: list of [vertices, faces]
-        points = [[model["vertices"][None], model["faces"].long()] for model in models]
-        if uv_type == "vertex":
-            uv_bxpx2 = [model["vertex_uvs"][None] for model in models]
-        else:  # face uv
-            uv_bxpx2 = [model["face_uvs"][None] for model in models]
-            ft_fx3_list = [model["face_uv_ids"] for model in models]
-        texture_bx3xthxtw = [model["texture"][None] for model in models]
-
-        dib_ren_im, dib_ren_prob, _, dib_ren_mask = self.scene_ren.forward(
-            points=points,
-            uv_bxpx2=uv_bxpx2,
-            texture_bx3xthxtw=texture_bx3xthxtw,
-            ts=ts,
-            ft_fx3=ft_fx3_list,
-        )
-
-        ret["color"] = dib_ren_im.squeeze()
-        ret["prob"] = dib_ren_prob.squeeze()
-        ret["mask"] = dib_ren_mask.squeeze()
-
-        if with_depth:
-            # transform xyz
-            # NOTE: check whether it should be in [0, 1] (maybe need to record min, max and denormalize later)
-            if not isinstance(Rs, torch.Tensor):
-                Rs = torch.stack(Rs)  # list
-            if rot_type == "quat":
-                R_mats = quat2mat_torch(Rs)
-            else:
-                R_mats = Rs
-            xyzs = [
-                misc.transform_pts_Rt_th(model["vertices"], R_mats[_id], ts[_id])[None]
-                for _id, model in enumerate(models)
-            ]
-            dib_ren_vc_batch = DIBRenderer(height, width, mode="VertexColorMulti")
-            dib_ren_vc_batch.set_camera_parameters(self.scene_ren.camera_params)
-            ren_xyzs, _, _, _ = dib_ren_vc_batch.forward(points=points, colors=xyzs)
-            ret["depth"] = ren_xyzs[0, :, :, 2]  # hw
-
-        # color: hw3; mask: hw; depth: hw
-        return ret
-
-    def render_batch(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        mode=["color", "depth"],
-    ):
-        """render a batch (vertex color), each contain one object
-        Args:
-            Rs (tensor): [b,3,3] or [b,4]
-            ts (tensor): [b,3,]
-            models (list of dicts): each stores {"vertices":, "colors":, "faces":, }
-            Ks (tensor): [b,3,3]
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-                xyz: bhw3
-                probs: bhw
-        """
-        assert self.dib_ren.mode in ["VertexColorBatch"], self.dib_ren.mode
-        ret = {}
-        self.dib_ren.set_camera_parameters_from_RT_K(Rs, ts, Ks, height, width, near=znear, far=zfar, rot_type=rot_type)
-
-        colors = [model["colors"][None] for model in models]  # b x [1, p, 3]
-        points = [[model["vertices"][None], model["faces"].long()] for model in models]
-
-        # points: list of [vertices, faces]
-        # colors: list of colors
-        color, im_prob, _, im_mask = self.dib_ren.forward(points=points, colors=colors)
-        ret["color"] = color
-        ret["prob"] = im_prob.squeeze(-1)
-        ret["mask"] = im_mask.squeeze(-1)
-
-        if "depth" in mode:
-            # transform xyz
-            if not isinstance(Rs, torch.Tensor):
-                Rs = torch.stack(Rs)  # list
-            if rot_type == "quat":
-                R_mats = quat2mat_torch(Rs)
-            else:
-                R_mats = Rs
-            xyzs = [
-                misc.transform_pts_Rt_th(model["vertices"], R_mats[_id], ts[_id])[None]
-                for _id, model in enumerate(models)
-            ]
-            ren_xyzs, _, _, _ = self.dib_ren.forward(points=points, colors=xyzs)
-            ret["depth"] = ren_xyzs[:, :, :, 2]  # bhw
-
-        if "xyz" in mode:  # TODO: check this
-            obj_xyzs = [model["vertices"][None] for _id, model in enumerate(models)]
-            ren_obj_xyzs, _, _, _ = self.dib_ren.forward(points=points, colors=obj_xyzs)
-            ret["xyz"] = ren_obj_xyzs
-        return ret
-
-    def render_batch_tex(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        uv_type="vertex",
-        rot_type="mat",
-        mode=["color", "depth"],
-    ):
-        """render a batch for textured objects
-        Args:
-            Rs: [b,3,3] or [b,4] tensor
-            ts: [b,3] tensor
-            models: list of dict, each stores
-                vertex uv: {"vertices":, "faces":, "texture":, "vertex_uvs":,}
-                face uv: {"vertices":, "faces":, "texture":, "face_uvs":, "face_uv_ids":,}
-            Ks: [b,3,3] or [3,3]
-            uv_type: `vertex` | `face`
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-                xyz: bhw3
-        """
-        assert self.dib_ren.mode in ["TextureBatch"], self.dib_ren.mode
-        ret = {}
-        self.dib_ren.set_camera_parameters_from_RT_K(Rs, ts, Ks, height, width, near=znear, far=zfar, rot_type=rot_type)
-        # points: list of [vertices, faces]
-        points = [[model["vertices"][None], model["faces"].long()] for model in models]
-        if uv_type == "vertex":
-            uv_bxpx2 = [model["vertex_uvs"][None] for model in models]
-        else:  # face uv
-            uv_bxpx2 = [model["face_uvs"][None] for model in models]
-            ft_fx3_list = [model["face_uv_ids"] for model in models]
-        texture_bx3xthxtw = [model["texture"][None] for model in models]
-
-        # points: list of [vertices, faces]
-        # colors: list of colors
-        dib_ren_im, dib_ren_prob, _, dib_ren_mask = self.dib_ren.forward(
-            points=points,
-            uv_bxpx2=uv_bxpx2,
-            texture_bx3xthxtw=texture_bx3xthxtw,
-            ft_fx3=ft_fx3_list,
-        )
-
-        ret["color"] = dib_ren_im
-        ret["prob"] = dib_ren_prob.squeeze(-1)  # bhw1 -> bhw
-        ret["mask"] = dib_ren_mask.squeeze(-1)  # bhw1 -> bhw
-
-        if "depth" in mode:
-            # transform xyz
-            # NOTE: check whether it should be in [0, 1] (maybe need to record min, max and denormalize later)
-            if not isinstance(Rs, torch.Tensor):
-                Rs = torch.stack(Rs)  # list
-            if rot_type == "quat":
-                R_mats = quat2mat_torch(Rs)
-            else:
-                R_mats = Rs
-            xyzs = [
-                misc.transform_pts_Rt_th(model["vertices"], R_mats[_id], ts[_id])[None]
-                for _id, model in enumerate(models)
-            ]
-            dib_ren_vc_batch = DIBRenderer(height, width, mode="VertexColorBatch")
-            dib_ren_vc_batch.set_camera_parameters(self.dib_ren.camera_params)
-            ren_xyzs, _, _, _ = dib_ren_vc_batch.forward(points=points, colors=xyzs)
-            if "depth" in mode:
-                ret["depth"] = ren_xyzs[:, :, :, 2]  # bhw
-
-        if "xyz" in mode:  # TODO: check this
-            obj_xyzs = [model["vertices"][None] for _id, model in enumerate(models)]
-            dib_ren_vc_batch = DIBRenderer(height, width, mode="VertexColorBatch")
-            dib_ren_vc_batch.set_camera_parameters(self.dib_ren.camera_params)
-            ren_obj_xyzs, _, _, _ = dib_ren_vc_batch.forward(points=points, colors=obj_xyzs)
-            ret["xyz"] = ren_obj_xyzs
-        return ret  # bxhxwx3 rgb, bhw prob/mask/depth
diff --git a/lib/dr_utils/dib_renderer_x/utils/__init__.py b/lib/dr_utils/dib_renderer_x/utils/__init__.py
deleted file mode 100644
index 273a6eda20cc912f93d86e115b5fe7756b182962..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/utils/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .utils import *
-from .mesh import *
-from .perspective import *
-from .sphericalcoord import *
diff --git a/lib/dr_utils/dib_renderer_x/utils/mesh.py b/lib/dr_utils/dib_renderer_x/utils/mesh.py
deleted file mode 100644
index a8851f3db1cb91c408eee42ec8508be16cca8030..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/utils/mesh.py
+++ /dev/null
@@ -1,481 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import os
-import torch
-import numpy as np
-
-
-##################################################################
-# faces begin from 0!!!
-def face2edge(facenp_fx3):
-    """facenp_fx3, int32 return edgenp_ex2, int32."""
-    f1 = facenp_fx3[:, 0:1]
-    f2 = facenp_fx3[:, 1:2]
-    f3 = facenp_fx3[:, 2:3]
-    e1 = np.concatenate((f1, f1, f2), axis=0)
-    e2 = np.concatenate((f2, f3, f3), axis=0)
-    edgenp_ex2 = np.concatenate((e1, e2), axis=1)
-    # sort & unique
-    edgenp_ex2 = np.sort(edgenp_ex2, axis=1)
-    edgenp_ex2 = np.unique(edgenp_ex2, axis=0)
-    return edgenp_ex2
-
-
-def face2edge2(facenp_fx3, edgenp_ex2):
-    """facenp_fx3, int32 edgenp_ex2, int32 return face_fx3, int32 this face is
-    indexed by edge."""
-    fnum = facenp_fx3.shape[0]
-    enum = edgenp_ex2.shape[0]
-
-    edgesort = np.sort(edgenp_ex2, axis=1)
-    edgere_fx3 = np.zeros_like(facenp_fx3)
-    for i in range(fnum):
-        for j in range(3):
-            pbe, pen = facenp_fx3[i, j], facenp_fx3[i, (j + 1) % 3]
-            if pbe > pen:
-                pbe, pen = pen, pbe
-            cond = (edgesort[:, 0] == pbe) & (edgesort[:, 1] == pen)
-            idx = np.where(cond)[0]
-            edgere_fx3[i, j] = idx
-    return edgere_fx3
-
-
-def edge2face(facenp_fx3, edgenp_ex2):
-    """facenp_fx3, int32 edgenp_ex2, int32 return edgenp_ex2, int32 this edge
-    is indexed by face."""
-    fnum = facenp_fx3.shape[0]
-    enum = edgenp_ex2.shape[0]
-
-    facesort = np.sort(facenp_fx3, axis=1)
-    edgesort = np.sort(edgenp_ex2, axis=1)
-    edgere_ex2 = np.zeros_like(edgesort)
-    for i in range(enum):
-        pbe, pen = edgesort[i]
-        eid = 0
-        for j in range(fnum):
-            f1, f2, f3 = facesort[j]
-            cond1 = f1 == pbe and f2 == pen
-            cond2 = f1 == pbe and f3 == pen
-            cond3 = f2 == pbe and f3 == pen
-            if cond1 or cond2 or cond3:
-                edgere_ex2[i, eid] = j
-                eid += 1
-
-    return edgere_ex2
-
-
-def face2pneimtx(facenp_fx3):
-    """facenp_fx3, int32 return pointneighbourmtx, pxp, float32 will normalize!
-
-    assume it is a good mesh every point has more than one neighbour
-    """
-    pnum = np.max(facenp_fx3) + 1
-    pointneighbourmtx = np.zeros(shape=(pnum, pnum), dtype=np.float32)
-    for i in range(3):
-        be = i
-        en = (i + 1) % 3
-        idx1 = facenp_fx3[:, be]
-        idx2 = facenp_fx3[:, en]
-        pointneighbourmtx[idx1, idx2] = 1
-        pointneighbourmtx[idx2, idx1] = 1
-    pointneicount = np.sum(pointneighbourmtx, axis=1, keepdims=True)
-    assert np.all(pointneicount > 0)
-    pointneighbourmtx /= pointneicount
-    return pointneighbourmtx
-
-
-def face2pfmtx(facenp_fx3):
-    """facenp_fx3, int32 reutrn pfmtx, pxf, float32."""
-    pnum = np.max(facenp_fx3) + 1
-    fnum = facenp_fx3.shape[0]
-    pfmtx = np.zeros(shape=(pnum, fnum), dtype=np.float32)
-    for i, f in enumerate(facenp_fx3):
-        pfmtx[f[0], i] = 1
-        pfmtx[f[1], i] = 1
-        pfmtx[f[2], i] = 1
-    return pfmtx
-
-
-# upsample new points
-def meshresample(pointnp_px3, facenp_fx3, edgenp_ex2):
-    p1 = pointnp_px3[edgenp_ex2[:, 0], :]
-    p2 = pointnp_px3[edgenp_ex2[:, 1], :]
-    pmid = (p1 + p2) / 2
-    point2np_px3 = np.concatenate((pointnp_px3, pmid), axis=0)
-
-    # delete f
-    # add 4 new faces
-    face2np_fx3 = []
-    pnum = np.max(facenp_fx3) + 1
-    for f in facenp_fx3:
-        p1, p2, p3 = f
-        p12 = (edgenp_ex2 == (min(p1, p2), max(p1, p2))).all(axis=1).nonzero()[0] + pnum
-        p23 = (edgenp_ex2 == (min(p2, p3), max(p2, p3))).all(axis=1).nonzero()[0] + pnum
-        p31 = (edgenp_ex2 == (min(p3, p1), max(p3, p1))).all(axis=1).nonzero()[0] + pnum
-        face2np_fx3.append([p1, p12, p31])
-        face2np_fx3.append([p12, p2, p23])
-        face2np_fx3.append([p31, p23, p3])
-        face2np_fx3.append([p12, p23, p31])
-    face2np_fx3 = np.array(face2np_fx3, dtype=np.int64)
-    return point2np_px3, face2np_fx3
-
-
-def mtx2tfsparse(mtx):
-    m, n = mtx.shape
-    rows, cols = np.nonzero(mtx)
-    # N = rows.shape[0]
-    # value = np.ones(shape=(N,), dtype=np.float32)
-    value = mtx[rows, cols]
-    v = torch.FloatTensor(value)
-    i = torch.LongTensor(np.stack((rows, cols), axis=0))
-    tfspmtx = torch.sparse.FloatTensor(i, v, torch.Size([m, n]))
-    return tfspmtx
-
-
-################################################################
-def loadobj(meshfile):
-
-    v = []
-    f = []
-    meshfp = open(meshfile, "r")
-    for line in meshfp.readlines():
-        data = line.strip().split(" ")
-        data = [da for da in data if len(da) > 0]
-        if len(data) != 4:
-            continue
-        if data[0] == "v":
-            v.append([float(d) for d in data[1:]])
-        if data[0] == "f":
-            data = [da.split("/")[0] for da in data]
-            f.append([int(d) for d in data[1:]])
-    meshfp.close()
-
-    # torch need int64
-    facenp_fx3 = np.array(f, dtype=np.int64) - 1
-    pointnp_px3 = np.array(v, dtype=np.float32)
-    return pointnp_px3, facenp_fx3
-
-
-def loadobjcolor(meshfile):
-
-    v = []
-    vc = []
-    f = []
-    meshfp = open(meshfile, "r")
-    for line in meshfp.readlines():
-        data = line.strip().split(" ")
-        data = [da for da in data if len(da) > 0]
-        if data[0] == "v":
-            v.append([float(d) for d in data[1:4]])
-            if len(data) == 7:
-                vc.append([float(d) for d in data[4:7]])
-        if data[0] == "f":
-            data = [da.split("/")[0] for da in data]
-            f.append([int(d) for d in data[1:4]])
-    meshfp.close()
-
-    # torch need int64
-    facenp_fx3 = np.array(f, dtype=np.int64) - 1
-    pointnp_px3 = np.array(v, dtype=np.float32)
-    if len(vc) > 0:
-        vc = np.array(vc, dtype=np.float32)
-    else:
-        vc = np.ones_like(pointnp_px3)
-    return pointnp_px3, facenp_fx3, vc
-
-
-def loadobjtex(meshfile):
-
-    v = []
-    vt = []
-    f = []
-    ft = []
-    meshfp = open(meshfile, "r")
-    for line in meshfp.readlines():
-        data = line.strip().split(" ")
-        data = [da for da in data if len(da) > 0]
-        if not ((len(data) == 3) or (len(data) == 4) or (len(data) == 5)):
-            continue
-        if data[0] == "v":
-            if len(data) == 4:
-                v.append([float(d) for d in data[1:]])
-        if data[0] == "vt":
-            if len(data) == 3 or len(data) == 4:
-                vt.append([float(d) for d in data[1:3]])
-        if data[0] == "f":
-            data = [da.split("/") for da in data]
-            if len(data) == 4:
-                f.append([int(d[0]) for d in data[1:]])
-                # print(data[1:])
-                ft.append([int(d[1]) for d in data[1:]])
-            elif len(data) == 5:
-                idx1 = [1, 2, 3]
-                data1 = [data[i] for i in idx1]
-                f.append([int(d[0]) for d in data1])
-                ft.append([int(d[1]) for d in data1])
-                idx2 = [1, 3, 4]
-                data2 = [data[i] for i in idx2]
-                f.append([int(d[0]) for d in data2])
-                ft.append([int(d[1]) for d in data2])
-    meshfp.close()
-
-    # torch need int64
-    facenp_fx3 = np.array(f, dtype=np.int64) - 1
-    ftnp_fx3 = np.array(ft, dtype=np.int64) - 1
-    pointnp_px3 = np.array(v, dtype=np.float32)
-    uvs = np.array(vt, dtype=np.float32)
-    return pointnp_px3, facenp_fx3, uvs, ftnp_fx3
-
-
-def savemesh(pointnp_px3, facenp_fx3, fname, partinfo=None):
-
-    if partinfo is None:
-        fid = open(fname, "w")
-        for pidx, p in enumerate(pointnp_px3):
-            pp = p
-            fid.write("v %f %f %f\n" % (pp[0], pp[1], pp[2]))
-        for f in facenp_fx3:
-            f1 = f + 1
-            fid.write("f %d %d %d\n" % (f1[0], f1[1], f1[2]))
-        fid.close()
-    else:
-        fid = open(fname, "w")
-        for pidx, p in enumerate(pointnp_px3):
-            if partinfo[pidx, -1] == 0:
-                pp = p
-                color = [1, 0, 0]
-            else:
-                pp = p
-                color = [0, 0, 1]
-            fid.write("v %f %f %f %f %f %f\n" % (pp[0], pp[1], pp[2], color[0], color[1], color[2]))
-        for f in facenp_fx3:
-            f1 = f + 1
-            fid.write("f %d %d %d\n" % (f1[0], f1[1], f1[2]))
-        fid.close()
-    return
-
-
-def savemeshcolor(pointnp_px3, facenp_fx3, fname, color_px3=None):
-
-    if color_px3 is None:
-        fid = open(fname, "w")
-        for pidx, p in enumerate(pointnp_px3):
-            pp = p
-            fid.write("v %f %f %f\n" % (pp[0], pp[1], pp[2]))
-        for f in facenp_fx3:
-            f1 = f + 1
-            fid.write("f %d %d %d\n" % (f1[0], f1[1], f1[2]))
-        fid.close()
-    else:
-        fid = open(fname, "w")
-        for pidx, p in enumerate(pointnp_px3):
-            pp = p
-            color = color_px3[pidx]
-            fid.write("v %f %f %f %f %f %f\n" % (pp[0], pp[1], pp[2], color[0], color[1], color[2]))
-        for f in facenp_fx3:
-            f1 = f + 1
-            fid.write("f %d %d %d\n" % (f1[0], f1[1], f1[2]))
-        fid.close()
-    return
-
-
-def savemeshtes(pointnp_px3, tcoords_px2, facenp_fx3, fname):
-
-    import os
-
-    fol, na = os.path.split(fname)
-    na, _ = os.path.splitext(na)
-
-    matname = "%s/%s.mtl" % (fol, na)
-    fid = open(matname, "w")
-    fid.write("newmtl material_0\n")
-    fid.write("Kd 1 1 1\n")
-    fid.write("Ka 0 0 0\n")
-    fid.write("Ks 0.4 0.4 0.4\n")
-    fid.write("Ns 10\n")
-    fid.write("illum 2\n")
-    fid.write("map_Kd %s.png\n" % na)
-    fid.close()
-
-    fid = open(fname, "w")
-    fid.write("mtllib %s.mtl\n" % na)
-
-    for pidx, p in enumerate(pointnp_px3):
-        pp = p
-        fid.write("v %f %f %f\n" % (pp[0], pp[1], pp[2]))
-
-    for pidx, p in enumerate(tcoords_px2):
-        pp = p
-        fid.write("vt %f %f\n" % (pp[0], pp[1]))
-
-    fid.write("usemtl material_0\n")
-    for f in facenp_fx3:
-        f1 = f + 1
-        fid.write("f %d/%d %d/%d %d/%d\n" % (f1[0], f1[0], f1[1], f1[1], f1[2], f1[2]))
-    fid.close()
-
-    return
-
-
-def save_textured_mesh(
-    directory,
-    file_name,
-    vertex_pos_px3,
-    face_fx3,
-    tex_coord_px2,
-    normalize_tex_coord=False,
-    flip_vertical=False,
-    texture_bias=0.01,
-):
-    """Save a textured mesh. Assumes the texture is *already* saved into.
-
-    <directory> as <file_name>.png.
-
-    Args:
-        directory (str): The path to the folder containing the mesh to be saved.
-        file_name (str): The name of the mesh to be saved (without extension).
-            <file_name>.obj and <file_name>.mtl will be saved.
-
-        vertex_pos_px3 (numpy.ndarray): An array of shape (num_points, 3).
-            Denotes the vertex position.
-
-        face_fx3 (numpy.ndarray): An array of shape (num_faces, 3).
-            Specifies, for each face, which vertices are used.
-
-        tex_coord_px2 (numpy.ndarray): An array of shape (num_points, 2).
-            Specifies the texture coordinate of each vertex.
-            Each coordinate should be in the range [0, 1] or [-1, -1].
-            If the range is [-1, -1], set normalize_tex_coord to True.
-
-            NOTE: if this array is of the same format as specified for
-            torch.nn.functional.grid_sample(), set both normalize_tex_coord
-            and flip_vertical to True.
-
-        normalize_tex_coord (bool): Whether to normalize texture coordinates,
-            from [-1, 1] to [0, 1].
-
-        flip_vertical (bool): Whether to flip the texture coordinates vertically.
-
-        texture_bias (float): If positive, trim the edge of the texture by this
-            amount to avoid artifacts.
-    """
-    if os.path.splitext(file_name)[1]:
-        raise ValueError("file_name to save_textured_mesh cannot contain extension")
-
-    if file_name.find(" ") != -1:
-        raise ValueError("file_name cannot contain space")
-
-    obj_path = os.path.join(directory, file_name + ".obj")
-    mtl_path = os.path.join(directory, file_name + ".mtl")
-
-    with open(obj_path, "w") as obj_file:
-        obj_file.write("mtllib ./{}.mtl\n".format(file_name))
-
-        for pos in vertex_pos_px3:
-            obj_file.write("v {} {} {}\n".format(pos[0], pos[1], pos[2]))
-
-        for uv in tex_coord_px2:
-            uv = uv * 0.5 + 0.5  # normalize from [-1, 1] to [0, 1]
-            uv = uv * (1.0 - texture_bias * 2.0) + texture_bias
-            obj_file.write("vt {} {}\n".format(uv[0], 1.0 - uv[1] if flip_vertical else uv[1]))
-
-        obj_file.write("usemtl material_0\n")
-
-        for i in range(face_fx3.shape[0]):
-            face = face_fx3[i] + 1
-            obj_file.write("f {0}/{0} {1}/{1} {2}/{2}\n".format(face[0], face[1], face[2]))
-
-    with open(mtl_path, "w") as mtl_file:
-        mtl_file.write(
-            """newmtl material_0
-Ka 0.200000 0.200000 0.200000
-Kd 1.000000 1.000000 1.000000
-Ks 1.000000 1.000000 1.000000
-map_Kd {}.png""".format(
-                file_name
-            )
-        )
-
-    return
-
-
-def saveobjscale(meshfile, scale, maxratio, shift=None):
-
-    mname, prefix = os.path.splitext(meshfile)
-    mnamenew = "%s-%.2f%s" % (mname, maxratio, prefix)
-
-    meshfp = open(meshfile, "r")
-    meshfp2 = open(mnamenew, "w")
-    for line in meshfp.readlines():
-        data = line.strip().split(" ")
-        data = [da for da in data if len(da) > 0]
-        if len(data) != 4:
-            meshfp2.write(line)
-            continue
-        else:
-            if data[0] == "v":
-                p = [scale * float(d) for d in data[1:]]
-                meshfp2.write("v %f %f %f\n" % (p[0], p[1], p[2]))
-            else:
-                meshfp2.write(line)
-                continue
-
-    meshfp.close()
-    meshfp2.close()
-
-    return
-
-
-if __name__ == "__main__":
-    import cv2
-
-    meshjson = "1.obj"
-
-    # f begin from 0!!!
-    pointnp_px3, facenp_fx3 = loadobj(meshjson)
-    assert np.max(facenp_fx3) == pointnp_px3.shape[0] - 1
-    assert np.min(facenp_fx3) == 0
-
-    pointnp_px3[:, 1] -= 0.05
-    X = pointnp_px3[:, 0]
-    Y = pointnp_px3[:, 1]
-    Z = pointnp_px3[:, 2]
-    h = 248 * (Y / Z) + 111.5
-    w = -248 * (X / Z) + 111.5
-
-    height = 224
-    width = 224
-    im = np.zeros(shape=(height, width), dtype=np.uint8)
-    for cir in zip(w, h):
-        cv2.circle(im, (int(cir[0]), int(cir[1])), 3, (255, 0, 0), -1)
-    cv2.imshow("", im)
-    cv2.waitKey()
-
-    # edge, neighbour and pfmtx
-    edgenp_ex2 = face2edge(facenp_fx3)
-
-    face_edgeidx_fx3 = face2edge2(facenp_fx3, edgenp_ex2)
-
-    pneimtx = face2pneimtx(facenp_fx3)
-    pfmtx = face2pfmtx(facenp_fx3)
-
-    # save
-    savemesh(pointnp_px3, facenp_fx3, "1s.obj")
diff --git a/lib/dr_utils/dib_renderer_x/utils/perspective.py b/lib/dr_utils/dib_renderer_x/utils/perspective.py
deleted file mode 100644
index 1865ea76e06ffec9f1cb4190b557a7faca7c5629..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/utils/perspective.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-import torch
-import numpy as np
-
-
-def unit(v):
-    norm = np.linalg.norm(v)
-    if norm == 0:
-        return v
-    return v / norm
-
-
-def lookatnp(eye_3x1, center_3x1, up_3x1):
-    # 3 variables should be length 1
-    camz = center_3x1 - eye_3x1
-    camz /= np.sqrt(np.sum(camz**2))
-    camx = np.cross(camz[:, 0], up_3x1[:, 0]).reshape(3, 1)
-    camy = np.cross(camx[:, 0], camz[:, 0]).reshape(3, 1)
-
-    # they are not guaranteed to be 1!!!
-    mtx = np.concatenate([unit(camx), unit(camy), -camz], axis=1).transpose()
-    shift = -(np.matmul(mtx, eye_3x1))
-    return mtx, shift
-
-
-def camera_info(param):
-    theta = np.deg2rad(param[0])
-    phi = np.deg2rad(param[1])
-
-    camY = param[3] * np.sin(phi)
-    temp = param[3] * np.cos(phi)
-    camX = temp * np.cos(theta)
-    camZ = temp * np.sin(theta)
-    cam_pos = np.array([camX, camY, camZ])
-
-    axisZ = cam_pos.copy()
-    axisY = np.array([0, 1, 0], dtype=np.float32)
-    axisX = np.cross(axisY, axisZ)
-    axisY = np.cross(axisZ, axisX)
-
-    # cam_mat = np.array([axisX, axisY, axisZ])
-    cam_mat = np.array([unit(axisX), unit(axisY), unit(axisZ)])
-
-    # for verify
-    # mtx, shift = lookatnp(cam_pos_3xb.reshape(3, 1), np.zeros(shape=(3, 1), dtype=np.float32), np.array([0,1,0], dtype=np.float32).reshape(3, 1))
-    # note, it is different from lookatnp
-    # new_p = mtx * old_p + shift
-    # new_p = cam_mat * (old_p - cam_pos)
-
-    return cam_mat, cam_pos
-
-
-#####################################################
-def perspectiveprojectionnp(fovy, ratio=1.0, near=0.01, far=10.0):
-    """
-    fovy: radian, 2 * atan2(h, 2*fy)
-    ratio: aspect_ratio, w/h, typically 4/3
-    """
-    tanfov = np.tan(fovy / 2.0)  # h/(2*fy)
-    # top = near * tanfov
-    # right = ratio * top
-    # mtx = [near / right, 0, 0, 0, \
-    #          0, near / top, 0, 0, \
-    #          0, 0, -(far+near)/(far-near), -2*far*near/(far-near), \
-    #          0, 0, -1, 0]
-    mtx = [
-        [1.0 / (ratio * tanfov), 0, 0, 0],
-        [0, 1.0 / tanfov, 0, 0],
-        [0, 0, -(far + near) / (far - near), -2 * far * near / (far - near)],
-        [0, 0, -1.0, 0],
-    ]
-    # return np.array(mtx, dtype=np.float32)
-    # 2*fy/h/ratio=2*fy/w, 2*fy/h
-    return np.array([[1.0 / (ratio * tanfov)], [1.0 / tanfov], [-1]], dtype=np.float32)
-
-
-def projectiveprojection_real(cam, x0, y0, w, h, nc=0.01, fc=10.0):
-    # this is for center view
-    # NOTE: only return a 3x1 vector (diagonal??)
-    q = -(fc + nc) / float(fc - nc)
-    qn = -2 * (fc * nc) / float(fc - nc)
-    fx = cam[0, 0]
-    fy = cam[1, 1]
-    px = cam[0, 2]
-    py = cam[1, 2]
-    """
-    # transpose: compensate for the flipped image
-    proj_T = [
-            [2*fx/w,          0,                0,  0],
-            [0,               2*fy/h,           0,  0],
-            [(-2*px+w+2*x0)/w, (2*py-h+2*y0)/h, q,  -1],
-            [0,               0,                qn, 0],
-        ]
-        sometimes: P[1,:] *= -1, P[2,:] *= -1
-        # Third column is standard glPerspective and sets near and far planes
-    """
-    # Draw our images upside down, so that all the pixel-based coordinate systems are the same
-    if isinstance(cam, np.ndarray):
-        proj_T = np.zeros((4, 4), dtype=np.float32)
-    elif isinstance(cam, torch.Tensor):
-        proj_T = torch.zeros(4, 4).to(cam)
-    else:
-        raise TypeError("cam should be ndarray or tensor, got {}".format(type(cam)))
-    proj_T[0, 0] = 2 * fx / w
-    proj_T[1, 0] = -2 * cam[0, 1] / w  # =0
-    proj_T[1, 1] = 2 * fy / h
-    proj_T[2, 0] = (-2 * px + w + 2 * x0) / w
-    proj_T[2, 1] = (+2 * py - h + 2 * y0) / h
-    proj_T[2, 2] = q
-    proj_T[3, 2] = qn
-    proj_T[2, 3] = -1.0
-    return proj_T
-
-
-#####################################################
-def camera_info_batch(param_bx4):
-
-    bnum = param_bx4.shape[0]
-    cam_mat_bx3x3 = []
-    cam_pos_bx3 = []
-
-    for i in range(bnum):
-        param = param_bx4[i]
-        cam_mat, cam_pos = camera_info(param)
-        cam_mat_bx3x3.append(cam_mat)
-        cam_pos_bx3.append(cam_pos)
-
-    cam_mat_bx3x3 = np.stack(cam_mat_bx3x3, axis=0)
-    cam_pos_bx3 = np.stack(cam_pos_bx3, axis=0)
-
-    return cam_mat_bx3x3, cam_pos_bx3
diff --git a/lib/dr_utils/dib_renderer_x/utils/sphericalcoord.py b/lib/dr_utils/dib_renderer_x/utils/sphericalcoord.py
deleted file mode 100644
index 0b0b0f4fde709f4a560a50650f217b88658a8d6e..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/utils/sphericalcoord.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-import numpy as np
-
-
-##################################################################
-# symmetric over z axis
-def get_spherical_coords_z(X):
-    # X is N x 3
-    rad = np.linalg.norm(X, axis=1)
-    # Inclination
-    theta = np.arccos(X[:, 2] / rad)
-    # Azimuth
-    phi = np.arctan2(X[:, 1], X[:, 0])
-
-    # Normalize both to be between [-1, 1]
-    vv = (theta / np.pi) * 2 - 1
-    uu = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
-    # Return N x 2
-    return np.stack([uu, vv], 1)
-
-
-# symmetric over x axis
-def get_spherical_coords_x(X):
-    # X is N x 3
-    rad = np.linalg.norm(X, axis=1)
-    # Inclination
-    # y == 1
-    # cos = 0
-    # y == -1
-    # cos = pi
-    theta = np.arccos(X[:, 0] / rad)
-    # Azimuth
-    phi = np.arctan2(X[:, 2], X[:, 1])
-
-    # Normalize both to be between [-1, 1]
-    uu = (theta / np.pi) * 2 - 1
-    vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
-    # Return N x 2
-    return np.stack([uu, vv], 1)
-
-
-# symmetric spherical projection
-def get_symmetric_spherical_tex_coords(vertex_pos, symmetry_axis=1, up_axis=2, front_axis=0):
-    # vertex_pos is N x 3
-    length = np.linalg.norm(vertex_pos, axis=1)
-    # Inclination
-    theta = np.arccos(vertex_pos[:, front_axis] / length)
-    # Azimuth
-    phi = np.abs(np.arctan2(vertex_pos[:, symmetry_axis], vertex_pos[:, up_axis]))
-
-    # Normalize both to be between [-1, 1]
-    uu = (theta / np.pi) * 2 - 1
-    # vv = ((phi + np.pi) / (2 * np.pi)) * 2 - 1
-    vv = (phi / np.pi) * 2 - 1
-    # Return N x 2
-    return np.stack([uu, vv], 1)
-
-
-#########################################################################
-if __name__ == "__main__":
-
-    from utils.utils_mesh import loadobj, savemeshtes
-    import cv2
-
-    p, f = loadobj("2.obj")
-    uv = get_spherical_coords_x(p)
-    uv[:, 0] = -uv[:, 0]
-
-    uv[:, 1] = -uv[:, 1]
-    uv = (uv + 1) / 2
-    savemeshtes(p, uv, f, "./2_x.obj")
-
-    tex = np.zeros(shape=(256, 512, 3), dtype=np.uint8)
-    font = cv2.FONT_HERSHEY_SIMPLEX
-    bottomLeftCornerOfText = (10, 200)
-    fontScale = 5
-    fontColor = (0, 255, 255)
-    lineType = 2
-
-    cv2.putText(
-        tex,
-        "Hello World!",
-        bottomLeftCornerOfText,
-        font,
-        fontScale,
-        fontColor,
-        lineType,
-    )
-    cv2.imshow("", tex)
-    cv2.waitKey()
-    cv2.imwrite("2_x.png", np.transpose(tex, [1, 0, 2]))
diff --git a/lib/dr_utils/dib_renderer_x/utils/utils.py b/lib/dr_utils/dib_renderer_x/utils/utils.py
deleted file mode 100644
index ecd5f6dec4e3c5893a881b0a544ffefe3620a564..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dib_renderer_x/utils/utils.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-# DEALINGS IN THE SOFTWARE.
-
-from __future__ import print_function
-from __future__ import division
-
-import torch
-import torch.nn
-
-eps = 1e-15
-
-
-##################################################
-def datanormalize(data, axis):
-    datalen = torch.sqrt(torch.sum(data**2, dim=axis, keepdim=True))
-    return data / (datalen + eps)
diff --git a/lib/dr_utils/dr_utils.py b/lib/dr_utils/dr_utils.py
deleted file mode 100644
index 0a4c2f106efeb0c289839db00eb8cb49c22025f0..0000000000000000000000000000000000000000
--- a/lib/dr_utils/dr_utils.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# differentiable renderer utils
-import cv2
-import numpy as np
-import torch
-import os.path as osp
-from tqdm import tqdm
-import hashlib
-import logging
-import mmcv
-from .rep import TriangleMesh
-from .dib_renderer_x import DIBRenderer
-from core.utils.pose_utils import quat2mat_torch
-from lib.utils.utils import iprint
-from lib.pysixd import misc
-
-
-def load_objs(
-    obj_paths,
-    texture_paths=None,
-    height=480,
-    width=640,
-    tex_resize=True,
-    tex_fmt="CHW",
-    tex_vflip=False,
-):
-    """
-    NOTE: ignore width, height if tex_resize=False
-    """
-    assert all([".obj" in _path for _path in obj_paths])
-
-    if texture_paths is not None:
-        assert len(obj_paths) == len(texture_paths)
-    models = []
-    for i, obj_path in enumerate(tqdm(obj_paths)):
-        model = {}
-        mesh = TriangleMesh.from_obj(obj_path)
-        vertices = mesh.vertices[:, :3]  # x,y,z
-        colors = mesh.vertices[:, 3:6]  # rgb
-        faces = mesh.faces.int()
-        ###########################
-        # normalize verts ( - center)
-        ###########################
-        vertices_max = vertices.max()
-        vertices_min = vertices.min()
-        vertices_middle = (vertices_max + vertices_min) / 2.0
-        vertices = vertices - vertices_middle
-        model["vertices"] = vertices[None, :, :].cuda()
-        model["colors"] = colors[None, :, :].cuda()
-        model["faces"] = faces[None, :, :].cuda()  # NOTE: -1
-        if texture_paths is not None:
-            uvs = mesh.uvs
-            face_textures = mesh.face_textures  # NOTE: -1
-            assert osp.exists(texture_paths[i]), texture_paths[i]
-            if tex_vflip:
-                texture = cv2.imread(texture_paths[i], cv2.IMREAD_COLOR)[::-1, :, ::-1].astype(np.float32) / 255.0
-            else:
-                texture = cv2.imread(texture_paths[i], cv2.IMREAD_COLOR)[:, :, ::-1].astype(np.float32) / 255.0
-            if tex_resize:
-                texture = cv2.resize(texture, (width, height), interpolation=cv2.INTER_AREA)
-            # print('texture map: ', texture.shape)
-            if tex_fmt == "CHW":
-                texture = torch.from_numpy(texture.transpose(2, 0, 1)[None, :, :, :]).cuda()
-            else:  # HWC
-                texture = torch.from_numpy(texture[None, :, :, :]).cuda()
-            model["face_uvs"] = uvs[None, :, :].cuda()
-            model["face_uv_ids"] = face_textures[None, :, :].cuda()
-            model["texture"] = texture.cuda()
-        models.append(model)
-
-    return models
-
-
-def render_dib_vc_batch(
-    ren,
-    Rs,
-    ts,
-    Ks,
-    obj_ids,
-    models,
-    rot_type="quat",
-    H=480,
-    W=640,
-    near=0.01,
-    far=100.0,
-    with_depth=False,
-):
-    """
-    Args:
-        ren: A DIB-renderer
-        models: All models loaded by load_objs
-    """
-    assert ren.mode in ["VertexColorBatch"], ren.mode
-    bs = len(Rs)
-    if len(Ks) == 1:
-        Ks = [Ks[0] for _ in range(bs)]
-    ren.set_camera_parameters_from_RT_K(Rs, ts, Ks, height=H, width=W, near=near, far=far, rot_type=rot_type)
-    colors = [models[_id]["colors"] for _id in obj_ids]  # b x [1, p, 3]
-    points = [[models[_id]["vertices"], models[_id]["faces"][0].long()] for _id in obj_ids]
-
-    # points: list of [vertices, faces]
-    # colors: list of colors
-    predictions, im_probs, _, im_masks = ren.forward(points=points, colors=colors)
-    if with_depth:
-        # transform xyz
-        if not isinstance(Rs, torch.Tensor):
-            Rs = torch.stack(Rs)  # list
-        if rot_type == "quat":
-            R_mats = quat2mat_torch(Rs)
-        else:
-            R_mats = Rs
-        xyzs = [
-            misc.transform_pts_Rt_th(models[obj_id]["vertices"][0], R_mats[_id], ts[_id])[None]
-            for _id, obj_id in enumerate(obj_ids)
-        ]
-        ren_xyzs, _, _, _ = ren.forward(points=points, colors=xyzs)
-        depth = ren_xyzs[:, :, :, 2]  # bhw
-    else:
-        depth = None
-    # bxhxwx3 rgb, bhw1 prob, bhw1 mask, bhw depth
-    return predictions, im_probs, im_masks, depth
-
-
-def render_dib_tex_batch(
-    ren,
-    Rs,
-    ts,
-    Ks,
-    obj_ids,
-    models,
-    rot_type="quat",
-    H=480,
-    W=640,
-    near=0.01,
-    far=100.0,
-    with_depth=False,
-):
-    assert ren.mode in ["TextureBatch"], ren.mode
-    bs = len(Rs)
-    if len(Ks) == 1:
-        Ks = [Ks[0] for _ in range(bs)]
-    ren.set_camera_parameters_from_RT_K(Rs, ts, Ks, height=H, width=W, near=near, far=far, rot_type=rot_type)
-    # points: list of [vertices, faces]
-    points = [[models[_id]["vertices"], models[_id]["faces"][0].long()] for _id in obj_ids]
-    uv_bxpx2 = [models[_id]["face_uvs"] for _id in obj_ids]
-    texture_bx3xthxtw = [models[_id]["texture"] for _id in obj_ids]
-    ft_fx3_list = [models[_id]["face_uv_ids"][0] for _id in obj_ids]
-
-    # points: list of [vertices, faces]
-    # colors: list of colors
-    dib_ren_im, dib_ren_prob, _, dib_ren_mask = ren.forward(
-        points=points,
-        uv_bxpx2=uv_bxpx2,
-        texture_bx3xthxtw=texture_bx3xthxtw,
-        ft_fx3=ft_fx3_list,
-    )
-
-    if with_depth:
-        # transform xyz
-        if not isinstance(Rs, torch.Tensor):
-            Rs = torch.stack(Rs)  # list
-        if rot_type == "quat":
-            R_mats = quat2mat_torch(Rs)
-        else:
-            R_mats = Rs
-        xyzs = [
-            misc.transform_pts_Rt_th(models[obj_id]["vertices"][0], R_mats[_id], ts[_id])[None]
-            for _id, obj_id in enumerate(obj_ids)
-        ]
-        dib_ren_vc_batch = DIBRenderer(height=H, width=W, mode="VertexColorBatch")
-        dib_ren_vc_batch.set_camera_parameters(ren.camera_params)
-        ren_xyzs, _, _, _ = dib_ren_vc_batch.forward(points=points, colors=xyzs)
-        depth = ren_xyzs[:, :, :, 2]  # bhw
-    else:
-        depth = None
-    return (
-        dib_ren_im,
-        dib_ren_prob,
-        dib_ren_mask,
-        depth,
-    )  # bxhxwx3 rgb, bhw1 prob/mask, bhw depth
-
-
-def render_dib_vc_multi(
-    ren,
-    Rs,
-    ts,
-    K,
-    obj_ids,
-    models,
-    rot_type="quat",
-    H=480,
-    W=640,
-    near=0.01,
-    far=100.0,
-):
-    assert ren.mode in ["VertexColorMulti"], ren.mode
-    ren.set_camera_parameters_from_RT_K(Rs, ts, K, height=H, width=W, near=near, far=far, rot_type=rot_type)
-    colors = [models[_id]["colors"] for _id in obj_ids]  # b x [1, p, 3]
-    points = [[models[_id]["vertices"], models[_id]["faces"][0].long()] for _id in obj_ids]
-
-    # points: list of [vertices, faces]
-    # colors: list of colors
-    predictions, im_prob, _, im_mask = ren.forward(points=points, colors=colors)
-    # TODO: add depth
-    return predictions, im_prob, im_mask  # 1xhxwx3 rgb
-
-
-def render_dib_tex_multi(
-    ren,
-    Rs,
-    ts,
-    K,
-    obj_ids,
-    models,
-    rot_type="quat",
-    H=480,
-    W=640,
-    near=0.01,
-    far=100.0,
-):
-    assert ren.mode in ["TextureMulti"], ren.mode
-    ren.set_camera_parameters_from_RT_K(Rs, ts, K, height=H, width=W, near=near, far=far, rot_type=rot_type)
-    # points: list of [vertices, faces]
-    points = [[models[_id]["vertices"], models[_id]["faces"][0].long()] for _id in obj_ids]
-    uv_bxpx2 = [models[_id]["face_uvs"] for _id in obj_ids]
-    texture_bx3xthxtw = [models[_id]["texture"] for _id in obj_ids]
-    ft_fx3_list = [models[_id]["face_uv_ids"][0] for _id in obj_ids]
-
-    dib_ren_im, dib_ren_prob, _, dib_ren_mask = ren.forward(
-        points=points,
-        uv_bxpx2=uv_bxpx2,
-        texture_bx3xthxtw=texture_bx3xthxtw,
-        ts=ts,
-        ft_fx3=ft_fx3_list,
-    )
-    # TODO: add depth
-    return (
-        dib_ren_im,
-        dib_ren_prob,
-        dib_ren_mask,
-    )  # 1xhxwx3 rgb, (1,h,w,1) prob/mask
diff --git a/lib/dr_utils/nvdr/__init__.py b/lib/dr_utils/nvdr/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/lib/dr_utils/nvdr/renderer_nvdr.py b/lib/dr_utils/nvdr/renderer_nvdr.py
deleted file mode 100644
index 04aae0865985ff0efd057bf55e716e56686458d1..0000000000000000000000000000000000000000
--- a/lib/dr_utils/nvdr/renderer_nvdr.py
+++ /dev/null
@@ -1,1001 +0,0 @@
-import os
-
-import cv2
-import numpy as np
-import nvdiffrast.torch as dr
-import torch
-from pytorch3d.structures import Meshes, list_to_packed, list_to_padded
-from tqdm import tqdm
-
-from core.utils.pose_utils import quat2mat_torch
-from lib.pysixd import inout, misc
-
-
-_GPU_ID = int(os.environ.get("CUDA_VISIBLE_DEVICES", "0").split(",")[0])
-
-
-def _list_to_packed(x):
-    """return packed tensor and ranges."""
-    (
-        x_packed,
-        num_items,
-        item_packed_first_idx,
-        item_packed_to_list_idx,
-    ) = list_to_packed(x)
-    ranges = torch.stack([item_packed_first_idx, num_items], dim=1).to(dtype=torch.int32, device="cpu")
-    return x_packed, ranges
-
-
-def _get_color_depth_xyz_code(mode):
-    all_modes = ["color", "depth", "xyz"]
-    return [int(_m in mode) for _m in all_modes]
-
-
-def _get_depth_xyz_code(mode):
-    all_modes = ["depth", "xyz"]
-    return [int(_m in mode) for _m in all_modes]
-
-
-def load_ply_models(
-    model_paths,
-    texture_paths=None,
-    vertex_scale=0.001,
-    device="cuda",
-    width=512,
-    height=512,
-    tex_resize=False,
-):
-    """
-    NOTE: ignore width and height if tex_resize=False
-    Args:
-        vertex_scale: default 0.001 is used for bop models!
-        tex_resize: resize the texture to smaller size for GPU memory saving
-    Returns:
-        a list of dicts
-    """
-    ply_models = [inout.load_ply(model_path, vertex_scale=vertex_scale) for model_path in model_paths]
-    models = []
-    for i, ply_model in enumerate(tqdm(ply_models)):
-        vertices = ply_model["pts"]
-        faces = ply_model["faces"]
-        if "colors" in ply_model:
-            colors = ply_model["colors"]
-            if colors.max() > 1.1:
-                colors = colors / 255.0
-        else:
-            colors = np.zeros_like(vertices)
-            colors[:, 0] = 223.0 / 255
-            colors[:, 1] = 214.0 / 255
-            colors[:, 2] = 205.0 / 255
-
-        if texture_paths is not None:
-            texture_path = texture_paths[i]
-            texture = cv2.imread(texture_path, cv2.IMREAD_COLOR)[::-1, :, ::-1].astype(np.float32) / 255.0
-            if tex_resize:
-                texture = cv2.resize(texture, (width, height), interpolation=cv2.INTER_AREA)
-            # print('texture map: ', texture.shape)
-            texture = torch.tensor(texture, device=device, dtype=torch.float32)
-            # uv coordinates for vertices
-            texture_uv = torch.tensor(ply_model["texture_uv"].astype("float32"), device=device)
-        else:
-            texture = None
-            texture_uv = None
-
-        vertices = torch.tensor(np.ascontiguousarray(vertices.astype("float32")), device=device)
-        # for ply, already 0-based
-        faces = torch.tensor(faces.astype("int32"), device=device)
-        colors = torch.tensor(colors.astype("float32"), device=device)
-
-        models.append(
-            {
-                "vertices": vertices,
-                "faces": faces,
-                "colors": colors,
-                "texture": texture,
-                "vertex_uvs": texture_uv,
-            }
-        )
-
-    return models
-
-
-class Renderer_nvdr(object):
-    def __init__(self, output_db=True, glctx_mode="manual", device="cuda", gpu_id=None):
-        """output_db (bool): Compute and output image-space derivates of
-        barycentrics.
-
-        glctx_mode: OpenGL context handling mode. Valid values are 'manual' and 'automatic'.
-        """
-        if glctx_mode == "auto":
-            glctx_mode = "automatic"
-        assert glctx_mode in ["automatic", "manual"], glctx_mode
-        self._glctx_mode = glctx_mode
-
-        self.output_db = output_db
-        self._diff_attrs = "all" if output_db else None
-        self._glctx = dr.RasterizeGLContext(output_db=output_db, mode=glctx_mode, device=gpu_id)
-        if glctx_mode == "manual":
-            self._glctx.set_context()
-        self._device = device
-
-        self._V = self._model_view()  # view matrix, (I_4x4)
-
-    def _transform_pos(self, mtx, pos):
-        """# Transform vertex positions to clip space
-        Args:
-            mtx: transform matrix [4, 4]
-            pos: [n,3] vertices
-        Returns:
-            [1,n,4]
-        """
-        assert pos.shape[1] == 3, pos.shape
-        t_mtx = torch.from_numpy(mtx).to(device=self._device) if isinstance(mtx, np.ndarray) else mtx
-        # (x,y,z) -> (x,y,z,1)
-        posw = torch.cat([pos, torch.ones([pos.shape[0], 1]).to(device=self._device)], dim=1)
-        # (n,4)x(4, 4)-->(1,n,4)
-        return torch.matmul(posw, t_mtx.t())[None, ...]
-
-    def _transform_pos_batch(self, mtx, pos):
-        """# Transform vertex positions to clip space
-        Args:
-            mtx: transform matrix [B, 4, 4]
-            pos: [B,n,3] vertices
-        Returns:
-            [B,n,4]
-        """
-        bs = mtx.shape[0]
-        assert pos.ndim == 3 and pos.shape[-1] == 3, pos.shape
-        t_mtx = torch.from_numpy(mtx).to(device=self._device) if isinstance(mtx, np.ndarray) else mtx
-        # (x,y,z) -> (x,y,z,1)
-        num_pts = pos.shape[1]
-        _ones = torch.ones([bs, num_pts, 1]).to(device=self._device, dtype=pos.dtype)
-        posw = torch.cat([pos, _ones], dim=-1)  # [B,n,4]
-        # (B,n,4)x(B,4,4)-->(B,n,4)
-        return torch.matmul(posw, t_mtx.t())
-
-    def _get_poses(self, Rs, ts, rot_type="mat"):
-        assert rot_type in ["mat", "quat"], rot_type
-        if rot_type == "quat":
-            rots = quat2mat_torch(Rs)
-        else:
-            rots = Rs
-        num = rots.shape[0]
-        assert ts.shape[0] == num, ts.shape
-        dtype = rots.dtype
-        poses = torch.cat([rots, ts.view(num, 3, 1)], dim=2)  # [num_objs,3,4]
-        poses_4x4 = torch.eye(4).repeat(num, 1, 1).to(poses)
-        poses_4x4[:, :3, :] = poses
-        return poses_4x4.to(device=self._device, dtype=dtype)
-
-    def _model_view(self):
-        V = np.eye(4)
-        V = np.ascontiguousarray(V, np.float32)
-        return torch.tensor(V, device=self._device)
-
-    def _projection(self, x=0.1, n=0.01, f=50.0):
-        P = np.array(
-            [
-                [n / x, 0, 0, 0],
-                [0, n / -x, 0, 0],
-                [0, 0, -(f + n) / (f - n), -(2 * f * n) / (f - n)],
-                [0, 0, -1, 0],
-            ]
-        ).astype(np.float32)
-        return torch.tensor(np.ascontiguousarray(P, np.float32), device=self._device)
-
-    def _projection_real(self, cam, x0, y0, w, h, nc=0.01, fc=10.0):
-        # this is for center view
-        # NOTE: only return a 3x1 vector (diagonal??)
-        q = -(fc + nc) / float(fc - nc)
-        qn = -2 * (fc * nc) / float(fc - nc)
-        fx = cam[0, 0]
-        fy = cam[1, 1]
-        px = cam[0, 2]
-        # HACK: lm: -4, ycbv: -2.5
-        py = cam[1, 2]  # + self.v_offset
-        """
-        # transpose: compensate for the flipped image
-        proj_T = [
-                [2*fx/w,          0,                0,  0],
-                [0,               2*fy/h,           0,  0],
-                [(-2*px+w+2*x0)/w, (2*py-h+2*y0)/h, q,  -1],
-                [0,               0,                qn, 0],
-            ]
-            sometimes: proj_T[1,:] *= -1, proj_T[2,:] *= -1
-            # Third column is standard glPerspective and sets near and far planes
-        """
-        # Draw our images upside down, so that all the pixel-based coordinate systems are the same
-        if isinstance(cam, np.ndarray):
-            proj_T = np.zeros((4, 4), dtype=np.float32)
-        elif isinstance(cam, torch.Tensor):
-            proj_T = torch.zeros(4, 4).to(cam)
-        else:
-            raise TypeError("cam should be ndarray or tensor, got {}".format(type(cam)))
-        proj_T[0, 0] = 2 * fx / w
-        proj_T[1, 0] = -2 * cam[0, 1] / w  # =0
-        proj_T[1, 1] = 2 * fy / h
-        proj_T[2, 0] = (-2 * px + w + 2 * x0) / w
-        proj_T[2, 1] = (+2 * py - h + 2 * y0) / h
-        proj_T[2, 2] = q
-        proj_T[3, 2] = qn
-        proj_T[2, 3] = -1.0
-
-        proj_T[2, :] *= -1
-        if isinstance(cam, np.ndarray):
-            P = proj_T.T
-            return torch.tensor(np.ascontiguousarray(P, np.float32), device=self._device)
-        elif isinstance(cam, torch.Tensor):
-            P = proj_T.t()
-            return P.contiguous().to(device=self._device)
-        else:
-            raise TypeError("cam should be ndarray or tensor, got {}".format(type(cam)))
-
-    def close(self):
-        if self._glctx_mode == "manual":
-            self._glctx.release_context()
-
-    def render_scene(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        K,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        antialias=True,
-        with_mask=False,
-        with_depth=True,
-    ):
-        """render a scene with m>=1 objects
-        Args:
-            Rs: [m,3,3] or [m,4] tensor
-            ts: [m,3,] tensor
-            models: list of dicts, each stores {"vertices":, "colors":, "faces":, }
-            K: [3,3]
-        Returns:
-            a dict:
-                color: (h,w,3)
-                mask: (h,w) fg mask
-                depth: (h,w)
-        """
-        P = self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar)
-        # Modelview + projection matrix.
-        mvp = torch.matmul(P, self._V)  # [4,4]
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [m,4,4]
-        mtx = torch.matmul(mvp.view(1, 4, 4), poses_4x4)  # [m,4,4]
-
-        vertices_list = [torch.squeeze(model["vertices"]) for model in models]
-        nvert_list = [_v.shape[0] for _v in vertices_list]
-        vert_offset_list = [0] + np.cumsum(nvert_list).tolist()[:-1]
-
-        model_colors_list = [torch.squeeze(model["colors"]) for model in models]
-        if with_depth:
-            pc_cam_list = [misc.transform_pts_Rt_th(vertices, R, t) for vertices, R, t in zip(vertices_list, Rs, ts)]
-            colors_depths_list = [
-                torch.cat([model_colors, pc_cam[:, 2:3]], dim=1)
-                for model_colors, pc_cam in zip(model_colors_list, pc_cam_list)
-            ]
-            colors_depths_all = torch.cat(colors_depths_list, dim=0)
-        else:
-            # no depth
-            colors_depths_all = torch.cat(model_colors_list, dim=0)
-
-        ####### render ###############
-        # list of [1, n, 4]
-        pos_clip_list = [self._transform_pos(mtx_i, vertices) for mtx_i, vertices in zip(mtx, vertices_list)]
-        pos_clip_all = torch.cat(pos_clip_list, dim=1)
-
-        pos_idx_list = [torch.squeeze(model["faces"].to(torch.int32)) for model in models]
-        pos_idx_list = [(pos_idx + _offset).to(torch.int32) for pos_idx, _offset in zip(pos_idx_list, vert_offset_list)]
-        pos_idx_all = torch.cat(pos_idx_list, dim=0)
-
-        rast_out, _ = dr.rasterize(self._glctx, pos_clip_all, pos_idx_all, resolution=[height, width])
-        color_depth, _ = dr.interpolate(colors_depths_all[None, ...], rast_out, pos_idx_all)
-        if antialias:
-            color_depth = dr.antialias(color_depth, rast_out, pos_clip_all, pos_idx_all)
-
-        color = color_depth[0, :, :, :3]
-        ret = {"color": color}
-        if with_mask:
-            mask = torch.clamp(rast_out[0, :, :, -1], 0, 1)
-            ret["mask"] = mask
-        if with_depth:
-            depth = color_depth[0, :, :, 3]
-            ret["depth"] = depth
-        # color: hw3; mask: hw; depth: hw
-        return ret
-
-    def render_scene_tex(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        K,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        uv_type="vertex",
-        enable_mip=True,
-        max_mip_level=10,
-        with_mask=False,
-        with_depth=True,
-    ):
-        """render a scene with m>=1 object for textured objects
-        Args:
-            Rs: [m,3,3] or [m,4] tensor
-            ts: [m,3] tensor
-            models: list of dict, each stores
-                vertex uv: {"vertices":, "faces":, "texture":, "vertex_uvs":,}
-                face uv: {"vertices":, "faces":, "texture":, "face_uvs":, "face_uv_ids":,}
-            K: [3,3]
-            uv_type: `vertex` | `face`
-        Returns:
-            dict:
-                color: (h,w,3)
-                mask: (h,w) fg mask (to get instance masks, use batch mode)
-                depth: (h,w)
-        """
-        P = self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar)
-        # Modelview + projection matrix.
-        mvp = torch.matmul(P, self._V)
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [m,4,4]
-        mtx = torch.matmul(mvp, poses_4x4)  # [m,4,4]
-
-        verts_list = [torch.squeeze(model["vertices"]) for model in models]
-        faces_list = [torch.squeeze(model["faces"].to(torch.int32)) for model in models]
-        meshes = Meshes(verts=verts_list, faces=faces_list)
-        # verts_packed = meshes.verts_packed()  # [sum(Vi),3]
-        faces_packed = meshes.faces_packed().to(dtype=torch.int32)  # [sum(Fi),3]
-        faces_ranges = torch.stack(
-            [
-                meshes.mesh_to_faces_packed_first_idx(),
-                meshes.num_faces_per_mesh(),
-            ],
-            dim=1,
-        ).to(dtype=torch.int32, device="cpu")
-
-        if with_depth:
-            pc_cam_list = [misc.transform_pts_Rt_th(_v, R, t) for _v, R, t in zip(verts_list, Rs, ts)]
-            pc_cam_packed, _ = _list_to_packed(pc_cam_list)  # [sum(Vi),3]
-
-        ####### render ###############
-        # list of [n, 4]
-        pos_clip_list = [self._transform_pos(mtx_i, _v)[0] for mtx_i, _v in zip(mtx, verts_list)]
-        pos_clip_packed, _ = _list_to_packed(pos_clip_list)
-
-        assert uv_type in ["vertex", "face"], uv_type
-        if uv_type == "vertex":
-            uv_list = [torch.squeeze(model["vertex_uvs"]) for model in models]
-            uv_packed, _ = _list_to_packed(uv_list)
-            uv_idx_packed = faces_packed  # faces
-        else:  # face uv
-            uv_list = [torch.squeeze(model["face_uvs"]) for model in models]
-            uv_packed, _ = _list_to_packed(uv_list)
-            uv_idx_list = [torch.squeeze(model["face_uv_ids"]).to(dtype=torch.int32) for model in models]
-            uv_idx_packed, uv_idx_ranges = _list_to_packed(uv_idx_list)
-        # NOTE: must be the same size
-        tex_list = [torch.squeeze(model["texture"]) for model in models]
-        tex_batch = torch.stack(tex_list, dim=0)  # [m,H,W,3]
-
-        # Render as a batch first -----------------------------------------------------
-        rast_out, rast_out_db = dr.rasterize(
-            self._glctx,
-            pos_clip_packed,
-            faces_packed,
-            ranges=faces_ranges,
-            resolution=[height, width],
-        )
-        if enable_mip:
-            texc, texd = dr.interpolate(
-                uv_packed,
-                rast_out,
-                uv_idx_packed,
-                rast_db=rast_out_db,
-                diff_attrs=self._diff_attrs,
-            )
-            color = dr.texture(
-                tex_batch,
-                texc,
-                texd,
-                filter_mode="linear-mipmap-linear",
-                max_mip_level=max_mip_level,
-            )
-        else:
-            texc, _ = dr.interpolate(uv_packed, rast_out, uv_idx_packed)
-            color = dr.texture(tex_batch, texc, filter_mode="linear")
-
-        masks = torch.clamp(rast_out[:, :, :, -1:], 0, 1)  # bhw1
-        color = color * masks  # Mask out background.
-        if with_depth:
-            im_pc_cam, _ = dr.interpolate(pc_cam_packed, rast_out, faces_packed)
-            depth = im_pc_cam[..., 2:3] * masks  # Mask out background.
-        else:
-            depth = None
-        # use the batched results as a scene --------------------------------------------------
-        ret = self._batch_to_scene_color_depth(
-            color,
-            ts=ts,
-            depths=depth,
-            masks=masks,
-            with_mask=with_mask,
-            with_depth=with_depth,
-        )
-        # color: hw3; mask: hw; depth: hw
-        return ret
-
-    def _batch_to_scene_color_depth(
-        self,
-        colors,
-        ts,
-        depths=None,
-        masks=None,
-        with_mask=False,
-        with_depth=True,
-    ):
-        """
-        Args:
-            colors: [b,h,w,3]
-            depths: [b,h,w,1]
-            ts: [b,3]
-            masks: [b,h,w,1] or None
-        Returns:
-            dict:
-                scene_color: hw3
-                scene_mask: hw
-                scene_depth: hw
-        """
-        tz_list = [_t[2] for _t in ts]  # render farther object first
-        dist_inds = np.argsort(tz_list)[::-1]  # descending order
-        if masks is None:
-            assert depths is not None
-            masks = (depths > 0).to(torch.float32)
-        for i, dist_i in enumerate(dist_inds):
-            if i == 0:
-                scene_color = colors[dist_i]
-                if with_mask:
-                    scene_mask = masks[dist_i, :, :, 0]
-                if with_depth:
-                    scene_depth = depths[dist_i, :, :, 0]
-            else:
-                cur_mask = torch.clamp(masks[dist_i], 0, 1)
-                mask_inds = torch.where(cur_mask[:, :, 0] > 0.5)
-                scene_color[mask_inds[0], mask_inds[1], :] = colors[dist_i, mask_inds[0], mask_inds[1], :]
-                if with_mask:
-                    scene_mask[mask_inds[0], mask_inds[1]] = masks[dist_i, mask_inds[0], mask_inds[1], 0]
-                if with_depth:
-                    scene_depth[mask_inds[0], mask_inds[1]] = depths[dist_i, mask_inds[0], mask_inds[1], 0]
-        ret = {"color": scene_color}
-        if with_mask:
-            ret["mask"] = scene_mask
-        if with_depth:
-            ret["depth"] = scene_depth
-        return ret
-
-    def render_batch(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        antialias=True,
-        mode=["color", "depth"],
-    ):
-        """render a batch (vertex color), each contain one object
-        Args:
-            Rs (tensor): [b,3,3] or [b,4]
-            ts (tensor): [b,3,]
-            models (list of dicts): each stores {"vertices":, "colors":, "faces":, }
-            Ks (ndarray): [b,3,3] or [3,3]
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-                xyz: bhw3
-        """
-        assert len(mode) >= 1, mode
-        bs = Rs.shape[0]
-        if not isinstance(Ks, (tuple, list)) and Ks.ndim == 2:
-            if isinstance(Ks, torch.Tensor):
-                Ks = [Ks.clone() for _ in range(bs)]
-            elif isinstance(Ks, np.ndarray):
-                Ks = [Ks.copy() for _ in range(bs)]
-            else:
-                raise TypeError(f"Unknown type of Ks: {type(Ks)}")
-
-        Ps = [self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar) for K in Ks]
-        Ps = torch.stack(Ps, dim=0)  # [b,4,4]
-        # Modelview + projection matrix.
-        mvp = torch.matmul(Ps, self._V)  # [b,4,4]
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [b,4,4]
-        mtx = torch.matmul(mvp, poses_4x4)  # [b,4,4]
-
-        verts_list = [torch.squeeze(model["vertices"]) for model in models]
-        faces_list = [torch.squeeze(model["faces"].to(torch.int32)) for model in models]
-        meshes = Meshes(verts=verts_list, faces=faces_list)
-        # verts_packed = meshes.verts_packed()  # [sum(Vi),3]
-        faces_packed = meshes.faces_packed().to(dtype=torch.int32)  # [sum(Fi),3]
-        faces_ranges = torch.stack(
-            [
-                meshes.mesh_to_faces_packed_first_idx(),
-                meshes.num_faces_per_mesh(),
-            ],
-            dim=1,
-        ).to(dtype=torch.int32, device="cpu")
-
-        ####### render the batch --------------------
-        # list of [Vi, 4]
-        pos_clip_list = [self._transform_pos(mtx_i, _v)[0] for mtx_i, _v in zip(mtx, verts_list)]
-        pos_clip_packed, _ = _list_to_packed(pos_clip_list)  # [sum(Vi),4]
-
-        rast_out, _ = dr.rasterize(
-            self._glctx,
-            pos_clip_packed,
-            faces_packed,
-            ranges=faces_ranges,
-            resolution=[height, width],
-        )
-        ret = {}
-        if "mask" in mode:
-            mask = torch.clamp(rast_out[:, :, :, -1], 0, 1)
-            ret["mask"] = mask
-
-        color_depth_xyz_code = _get_color_depth_xyz_code(mode)  # color, depth, xyz
-        if sum(color_depth_xyz_code) > 0:
-            # color, depth, xyz
-            if "color" in mode:
-                model_colors_list = [torch.squeeze(model["colors"]) for model in models]
-            else:
-                model_colors_list = None
-            if "depth" in mode:
-                pc_cam_list = [misc.transform_pts_Rt_th(verts, R, t) for verts, R, t in zip(verts_list, Rs, ts)]
-            else:
-                pc_cam_list = None
-            colors_depths_verts_list = []
-            for mesh_i, verts in enumerate(verts_list):
-                colors_depths_verts_i = []
-                if "color" in mode:
-                    colors_depths_verts_i.append(model_colors_list[mesh_i])
-                if "depth" in mode:
-                    colors_depths_verts_i.append(pc_cam_list[mesh_i][:, 2:3])
-                if "xyz" in mode:  # color,depth,xyz
-                    colors_depths_verts_i.append(verts)
-                colors_depths_verts_list.append(torch.cat(colors_depths_verts_i, dim=1))
-            # [sum(Vi),C], C=1,3,4,or 7
-            colors_depths_verts_packed, _ = _list_to_packed(colors_depths_verts_list)
-            # render
-            color_depth_xyz, _ = dr.interpolate(colors_depths_verts_packed, rast_out, faces_packed)
-            if antialias:
-                color_depth_xyz = dr.antialias(color_depth_xyz, rast_out, pos_clip_packed, faces_packed)
-
-            if color_depth_xyz_code == [0, 0, 1]:  # 1
-                ret["xyz"] = color_depth_xyz[..., :3]
-            elif color_depth_xyz_code == [0, 1, 0]:  # 2
-                ret["depth"] = color_depth_xyz[..., 0]
-            elif color_depth_xyz_code == [0, 1, 1]:  # 3
-                ret["depth"] = color_depth_xyz[..., 0]
-                ret["xyz"] = color_depth_xyz[..., 1:4]
-            elif color_depth_xyz_code == [1, 0, 0]:  # 4
-                ret["color"] = color_depth_xyz[..., :3]
-            elif color_depth_xyz_code == [1, 0, 1]:  # 5
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["xyz"] = color_depth_xyz[..., 3:6]
-            elif color_depth_xyz_code == [1, 1, 0]:  # 6
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["depth"] = color_depth_xyz[..., 3]
-            elif color_depth_xyz_code == [1, 1, 1]:  # 7
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["depth"] = color_depth_xyz[..., 3]
-                ret["xyz"] = color_depth_xyz[..., 4:7]
-
-        # color: bhw3; mask: bhw; depth: bhw; xyz: bhw3
-        return ret
-
-    def render_batch_tex(
-        self,
-        Rs,
-        ts,
-        models,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        uv_type="vertex",
-        enable_mip=True,
-        max_mip_level=10,
-        mode=["color", "depth"],
-    ):
-        """render a batch for textured objects
-        Args:
-            Rs: [b,3,3] or [b,4] tensor
-            ts: [b,3] tensor
-            models: list of dict, each stores
-                vertex uv: {"vertices":, "faces":, "texture":, "vertex_uvs":,}
-                face uv: {"vertices":, "faces":, "texture":, "face_uvs":, "face_uv_ids":,}
-            Ks: [b,3,3] or [3,3]
-            uv_type: `vertex` | `face`
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-                xyz: bhw3
-        """
-        assert len(mode) >= 1, mode
-        bs = Rs.shape[0]
-        if not isinstance(Ks, (tuple, list)) and Ks.ndim == 2:
-            if isinstance(Ks, torch.Tensor):
-                Ks = [Ks.clone() for _ in range(bs)]
-            elif isinstance(Ks, np.ndarray):
-                Ks = [Ks.copy() for _ in range(bs)]
-            else:
-                raise TypeError(f"Unknown type of Ks: {type(Ks)}")
-        Ps = [self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar) for K in Ks]
-        Ps = torch.stack(Ps, dim=0)  # [b,4,4]
-        # Modelview + projection matrix.
-        mvp = torch.matmul(Ps, self._V)  # [b,4,4]
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [b,4,4]
-        mtx = torch.matmul(mvp, poses_4x4)  # [b,4,4]
-
-        verts_list = [torch.squeeze(model["vertices"]) for model in models]
-        faces_list = [torch.squeeze(model["faces"].to(torch.int32)) for model in models]
-        meshes = Meshes(verts=verts_list, faces=faces_list)
-        # verts_packed = meshes.verts_packed()  # [sum(Vi),3]
-        faces_packed = meshes.faces_packed().to(dtype=torch.int32)  # [sum(Fi),3]
-        faces_ranges = torch.stack(
-            [
-                meshes.mesh_to_faces_packed_first_idx(),
-                meshes.num_faces_per_mesh(),
-            ],
-            dim=1,
-        ).to(dtype=torch.int32, device="cpu")
-
-        ####### render ###############
-        # list of [Vi, 4]
-        pos_clip_list = [self._transform_pos(mtx_i, _v)[0] for mtx_i, _v in zip(mtx, verts_list)]
-        pos_clip_packed, _ = _list_to_packed(pos_clip_list)  # [sum(Vi),4]
-
-        rast_out, rast_out_db = dr.rasterize(
-            self._glctx,
-            pos_clip_packed,
-            faces_packed,
-            ranges=faces_ranges,
-            resolution=[height, width],
-        )
-        mask = torch.clamp(rast_out[..., -1:], 0, 1)
-        ret = {}
-        if "mask" in mode:
-            ret["mask"] = mask.squeeze(-1)
-        if "color" in mode:
-            assert uv_type in ["vertex", "face"], uv_type
-            if uv_type == "vertex":
-                uv_list = [torch.squeeze(model["vertex_uvs"]) for model in models]
-                uv_packed, _ = _list_to_packed(uv_list)
-                uv_idx_packed = faces_packed  # faces
-            else:  # face uv
-                uv_list = [torch.squeeze(model["face_uvs"]) for model in models]
-                uv_packed, _ = _list_to_packed(uv_list)
-                uv_idx_list = [torch.squeeze(model["face_uv_ids"]).to(dtype=torch.int32) for model in models]
-                uv_idx_packed, uv_idx_ranges = _list_to_packed(uv_idx_list)
-            # NOTE: must be the same size
-            tex_list = [torch.squeeze(model["texture"]) for model in models]
-            tex_batch = torch.stack(tex_list, dim=0)  # [b,H,W,3]
-
-            if enable_mip:
-                texc, texd = dr.interpolate(
-                    uv_packed,
-                    rast_out,
-                    uv_idx_packed,
-                    rast_db=rast_out_db,
-                    diff_attrs=self._diff_attrs,
-                )
-                color = dr.texture(
-                    tex_batch,
-                    texc,
-                    texd,
-                    filter_mode="linear-mipmap-linear",
-                    max_mip_level=max_mip_level,
-                )
-            else:
-                texc, _ = dr.interpolate(uv_packed, rast_out, uv_idx_packed)
-                color = dr.texture(tex_batch, texc, filter_mode="linear")
-
-            color = color * mask  # Mask out background.
-            ret["color"] = color
-
-        depth_xyz_code = _get_depth_xyz_code(mode)
-        if sum(depth_xyz_code) > 0:
-            if "depth" in mode:
-                pc_cam_list = [misc.transform_pts_Rt_th(_v, R, t) for _v, R, t in zip(verts_list, Rs, ts)]
-            else:
-                pc_cam_list = None
-            depths_verts_list = []
-            for mesh_i, verts in enumerate(verts_list):
-                depths_verts_i = []
-                if "depth" in mode:
-                    depths_verts_i.append(pc_cam_list[mesh_i][:, 2:3])
-                if "xyz" in mode:
-                    depths_verts_i.append(verts)
-                depths_verts_list.append(torch.cat(depths_verts_i, dim=1))
-            # [sum(Vi),C], C=1,3,or 4
-            depths_verts_packed, _ = _list_to_packed(depths_verts_list)
-
-            depth_xyz, _ = dr.interpolate(depths_verts_packed, rast_out, faces_packed)
-            depth_xyz = depth_xyz * mask  # Mask out background.
-            if depth_xyz_code == [0, 1]:  # 1
-                ret["xyz"] = depth_xyz[..., :3]
-            elif depth_xyz_code == [1, 0]:  # 2
-                ret["depth"] = depth_xyz[..., 0]
-            elif depth_xyz_code == [1, 1]:  # 3
-                ret["depth"] = depth_xyz[..., 0]
-                ret["xyz"] = depth_xyz[..., 1:4]
-        # color: bhw3; mask: bhw; depth: bhw; xyz: bhw3
-        return ret
-
-    def render_batch_single(
-        self,
-        Rs,
-        ts,
-        model,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        antialias=True,
-        mode=["color", "depth"],
-    ):
-        """render a batch (vertex color) for the same object
-        Args:
-            Rs (tensor): [b,3,3] or [b,4]
-            ts (tensor): [b,3,]
-            model (dict): stores {"vertices":, "colors":, "faces":, }
-            Ks (ndarray): [b,3,3] or [3,3]
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-                xyz: bhw3
-        """
-        assert len(mode) >= 1, mode
-        bs = Rs.shape[0]
-        if not isinstance(Ks, (tuple, list)) and Ks.ndim == 2:
-            if isinstance(Ks, torch.Tensor):
-                Ks = [Ks.clone() for _ in range(bs)]
-            elif isinstance(Ks, np.ndarray):
-                Ks = [Ks.copy() for _ in range(bs)]
-            else:
-                raise TypeError(f"Unknown type of Ks: {type(Ks)}")
-        Ps = [self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar) for K in Ks]
-        Ps = torch.stack(Ps, dim=0)  # [b,4,4]
-        # Modelview + projection matrix.
-        mvp = torch.matmul(Ps, self._V)  # [b,4,4]
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [b,4,4]
-        mtx = torch.matmul(mvp, poses_4x4)  # [b,4,4]
-
-        verts = torch.squeeze(model["vertices"])
-        faces = torch.squeeze(model["faces"].to(torch.int32))
-
-        # color, depth, xyz
-        if "color" in mode:
-            model_colors = torch.squeeze(model["colors"])
-        else:
-            model_colors = None
-        if "depth" in mode:
-            pc_cam_list = [misc.transform_pts_Rt_th(verts, R, t) for R, t in zip(Rs, ts)]
-        else:
-            pc_cam_list = None
-
-        ####### render the batch --------------------
-        # list of [V, 4]
-        pos_clip_list = [self._transform_pos(mtx_i, verts)[0] for mtx_i in mtx]
-        pos_clip_batch = torch.stack(pos_clip_list, dim=0)  # [b,V,4]
-
-        rast_out, _ = dr.rasterize(self._glctx, pos_clip_batch, faces, resolution=[height, width])
-        ret = {}
-        if "mask" in mode:
-            mask = torch.clamp(rast_out[:, :, :, -1], 0, 1)
-            ret["mask"] = mask
-        color_depth_xyz_code = _get_color_depth_xyz_code(mode)
-        if sum(color_depth_xyz_code) > 0:
-            if color_depth_xyz_code == [0, 0, 1]:
-                colors_depths_verts_batch = verts[None]  # [1,V,3]
-            elif color_depth_xyz_code == [1, 0, 0]:
-                colors_depths_verts_batch = model_colors[None]  # [1,V,3]
-            elif color_depth_xyz_code == [1, 0, 1]:
-                colors_depths_verts_batch = torch.cat([model_colors, verts], dim=1)[None]  # [1,V,6]
-            else:
-                # list of [V, C], C=1,4,or 7
-                colors_depths_verts_list = []
-                for b_i in range(bs):
-                    colors_depths_verts_i = []
-                    if "color" in mode:
-                        colors_depths_verts_i.append(model_colors)
-                    if "depth" in mode:
-                        colors_depths_verts_i.append(pc_cam_list[b_i][:, 2:3])
-                    if "xyz" in mode:
-                        colors_depths_verts_i.append(verts)
-                    colors_depths_verts_list.append(torch.cat(colors_depths_verts_i, dim=1))
-                colors_depths_verts_batch = torch.stack(colors_depths_verts_list, dim=0)  # [b,V,C]
-
-            color_depth_xyz, _ = dr.interpolate(colors_depths_verts_batch, rast_out, faces)
-            if antialias:
-                color_depth_xyz = dr.antialias(color_depth_xyz, rast_out, pos_clip_batch, faces)
-
-            if color_depth_xyz_code == [0, 0, 1]:  # 1
-                ret["xyz"] = color_depth_xyz[..., :3]
-            elif color_depth_xyz_code == [0, 1, 0]:  # 2
-                ret["depth"] = color_depth_xyz[..., 0]
-            elif color_depth_xyz_code == [0, 1, 1]:  # 3
-                ret["depth"] = color_depth_xyz[..., 0]
-                ret["xyz"] = color_depth_xyz[..., 1:4]
-            elif color_depth_xyz_code == [1, 0, 0]:  # 4
-                ret["color"] = color_depth_xyz[..., :3]
-            elif color_depth_xyz_code == [1, 0, 1]:  # 5
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["xyz"] = color_depth_xyz[..., 3:6]
-            elif color_depth_xyz_code == [1, 1, 0]:  # 6
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["depth"] = color_depth_xyz[..., 3]
-            elif color_depth_xyz_code == [1, 1, 1]:  # 7
-                ret["color"] = color_depth_xyz[..., :3]
-                ret["depth"] = color_depth_xyz[..., 3]
-                ret["xyz"] = color_depth_xyz[..., 4:7]
-        # color: bhw3; mask: bhw; depth: bhw; xyz: bhw3
-        return ret
-
-    def render_batch_single_tex(
-        self,
-        Rs,
-        ts,
-        model,
-        *,
-        Ks,
-        width,
-        height,
-        znear=0.01,
-        zfar=100,
-        rot_type="mat",
-        uv_type="vertex",
-        enable_mip=True,
-        max_mip_level=10,
-        mode=["color", "depth"],
-    ):
-        """render a batch for a same textured object
-        Args:
-            Rs: [b,3,3] or [b,4] tensor
-            ts: [b,3] tensor
-            model: stores
-                vertex uv: {"vertices":, "faces":, "texture":, "vertex_uvs":,}
-                or face uv: {"vertices":, "faces":, "texture":, "face_uvs":, "face_uv_ids":,}
-            Ks: [b,3,3] or [3,3]
-            uv_type: `vertex` | `face`
-            mode: color, depth, mask, xyz (one or more must be given)
-        Returns:
-            dict:
-                color: bhw3
-                mask: bhw
-                depth: bhw
-        """
-        assert len(mode) >= 1, mode
-        bs = Rs.shape[0]
-        if not isinstance(Ks, (tuple, list)) and Ks.ndim == 2:
-            if isinstance(Ks, torch.Tensor):
-                Ks = [Ks.clone() for _ in range(bs)]
-            elif isinstance(Ks, np.ndarray):
-                Ks = [Ks.copy() for _ in range(bs)]
-            else:
-                raise TypeError(f"Unknown type of Ks: {type(Ks)}")
-        Ps = [self._projection_real(cam=K, x0=0, y0=0, w=width, h=height, nc=znear, fc=zfar) for K in Ks]
-        Ps = torch.stack(Ps, dim=0)  # [b,4,4]
-        # Modelview + projection matrix.
-        mvp = torch.matmul(Ps, self._V)  # [b,4,4]
-        assert rot_type in ["mat", "quat"], f"Unknown rot_type: {rot_type}"
-        poses_4x4 = self._get_poses(Rs, ts, rot_type=rot_type)  # [b,4,4]
-        mtx = torch.matmul(mvp, poses_4x4)  # [b,4,4]
-
-        verts = torch.squeeze(model["vertices"])
-        faces = torch.squeeze(model["faces"].to(torch.int32))
-
-        ####### render ###############
-        pos_clip_list = [self._transform_pos(mtx_i, verts)[0] for mtx_i in mtx]  # list of [V, 4]
-        pos_clip_batch = torch.stack(pos_clip_list, dim=0)  # [b,V,4]
-
-        assert uv_type in ["vertex", "face"], uv_type
-        if uv_type == "vertex":
-            uv = torch.squeeze(model["vertex_uvs"])
-            uv_idx = faces  # faces
-        else:  # face uv
-            uv = torch.squeeze(model["face_uvs"])
-            uv_idx = torch.squeeze(model["face_uv_ids"]).to(dtype=torch.int32)
-
-        tex = torch.squeeze(model["texture"])  # [H,W,3]
-
-        # Render as a batch
-        rast_out, rast_out_db = dr.rasterize(self._glctx, pos_clip_batch, faces, resolution=[height, width])
-        mask = torch.clamp(rast_out[..., -1:], 0, 1)  # bhw1
-        ret = {}
-        if "mask" in mode:
-            ret["mask"] = mask.squeeze(-1)
-        if "color" in mode:
-            if enable_mip and self.output_db:
-                texc, texd = dr.interpolate(
-                    uv[None],
-                    rast_out,
-                    uv_idx,
-                    rast_db=rast_out_db,
-                    diff_attrs=self._diff_attrs,
-                )
-                color = dr.texture(
-                    tex[None],
-                    texc,
-                    uv_da=texd,
-                    filter_mode="linear-mipmap-linear",
-                    max_mip_level=max_mip_level,
-                )
-            else:
-                texc, _ = dr.interpolate(uv[None], rast_out, uv_idx)
-                color = dr.texture(tex[None], texc, filter_mode="linear")
-
-            color = color * mask  # Mask out background.
-            ret["color"] = color
-
-        depth_xyz_code = _get_depth_xyz_code(mode)
-        if sum(depth_xyz_code) > 0:
-            if "depth" in mode:
-                pc_cam_list = [misc.transform_pts_Rt_th(verts, R, t) for R, t in zip(Rs, ts)]
-            else:
-                pc_cam_list = None
-            if depth_xyz_code == [0, 1]:
-                depths_verts_batch = verts[None]  # [1,V,3]
-            else:
-                depths_verts_list = []
-                for b_i in range(bs):
-                    depths_verts_i = []
-                    if "depth" in mode:
-                        depths_verts_i.append(pc_cam_list[b_i][:, 2:3])
-                    if "xyz" in mode:
-                        depths_verts_i.append(verts)
-                    depths_verts_list.append(torch.cat(depths_verts_i, dim=1))
-                # [b,V,C], C=1 or 4
-                depths_verts_batch = torch.stack(depths_verts_list, dim=0)
-
-            depth_xyz, _ = dr.interpolate(depths_verts_batch, rast_out, faces)
-            depth_xyz = depth_xyz * mask  # Mask out background.
-
-            if depth_xyz_code == [0, 1]:  # 1
-                ret["xyz"] = depth_xyz[..., :3]
-            elif depth_xyz_code == [1, 0]:  # 2
-                ret["depth"] = depth_xyz[..., 0]
-            elif depth_xyz_code == [1, 1]:  # 3
-                ret["depth"] = depth_xyz[..., 0]
-                ret["xyz"] = depth_xyz[..., 1:4]
-        # color: bhw3; mask: bhw; depth: bhw; xyz: bhw3
-        return ret
diff --git a/lib/dr_utils/rep/Mesh.py b/lib/dr_utils/rep/Mesh.py
deleted file mode 100644
index e3da9cb8725246358c85e2e9b262d6a20a0fc9e0..0000000000000000000000000000000000000000
--- a/lib/dr_utils/rep/Mesh.py
+++ /dev/null
@@ -1,1155 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-#     http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Optional
-
-from abc import abstractmethod
-import os
-from PIL import Image
-
-import torch
-import numpy as np
-
-from .helpers import _assert_tensor
-from .helpers import _composedecorator
-
-# import kaolin.cuda.load_textures as load_textures_cuda
-# import kaolin as kal
-
-
-class Mesh:
-    """Abstract class to represent 3D polygon meshes."""
-
-    def __init__(
-        self,
-        vertices: torch.Tensor,
-        faces: torch.Tensor,
-        uvs: torch.Tensor,
-        face_textures: torch.Tensor,
-        textures: torch.Tensor,
-        edges: torch.Tensor,
-        edge2key: dict,
-        vv: torch.Tensor,
-        vv_count: torch.Tensor,
-        vf: torch.Tensor,
-        vf_count: torch.Tensor,
-        ve: torch.Tensor,
-        ve_count: torch.Tensor,
-        ff: torch.Tensor,
-        ff_count: torch.Tensor,
-        ef: torch.Tensor,
-        ef_count: torch.Tensor,
-        ee: torch.Tensor,
-        ee_count: torch.Tensor,
-    ):
-
-        # Vertices of the mesh
-        self.vertices = vertices
-        # Faces of the mesh
-        self.faces = faces
-        # uv coordinates of each vertex
-        self.uvs = uvs
-        # uv indecies for each face
-        self.face_textures = face_textures
-        # texture for each face
-        self.textures = textures
-        # Edges of the mesh
-        self.edges = edges
-        # Dictionary that maps an edge (tuple) to an edge idx
-        self.edge2key = edge2key
-        # Vertex-Vertex neighborhood tensor (for each vertex, contains
-        # indices of the vertices neighboring it)
-        self.vv = vv
-        # Number of vertices neighbouring each vertex
-        self.vv_count = vv_count
-        # Vertex-Face neighborhood tensor
-        self.vf = vf
-        # Number of faces neighbouring each vertex
-        self.vf_count = vf_count
-        # Vertex-Edge neighborhood tensor
-        self.ve = ve
-        # Number of edges neighboring each vertex
-        self.ve_count = ve_count
-        # Face-Face neighborhood tensor
-        self.ff = ff
-        # Number of faces neighbouring each face
-        self.ff_count = ff_count
-        # Edge-Face neighbourhood tensor
-        self.ef = ef
-        # Number of edges neighbouring each face
-        self.ef_count = ef_count
-        # Edge-Edge neighbourhood tensor
-        self.ee = ee
-        # Number of edges neighbouring each edge
-        self.ee_count = ee_count
-        # adjacency matrix for verts
-        self.adj = None
-
-        # Initialize device on which tensors reside.
-        self.device = self.vertices.device
-
-    @classmethod
-    def from_tensors(
-        cls,
-        vertices: torch.Tensor,
-        faces: torch.Tensor,
-        uvs: torch.Tensor = None,
-        face_textures: torch.Tensor = None,
-        textures: torch.Tensor = None,
-        enable_adjacency=False,
-    ):
-        r"""Returns mesh with supplied tensor information.
-
-        Args:
-            vertices (torch.Tensor): mesh vertices.
-            faces (torch.Tensor): mesh faces.
-            uvs (torch.Tensor): uv coordinates for the vertices in mesh.
-            face_textures (torch.Tensor): uv number for each face's vertices.
-            textures (torch.Tensor):  texture info for each face.
-            enable_adjacency (torch.Tensor): adjacency information is computed
-        """
-        vertices = vertices.clone()
-        faces = faces.clone()
-        if enable_adjacency:
-            (
-                edge2key,
-                edges,
-                vv,
-                vv_count,
-                ve,
-                ve_count,
-                vf,
-                vf_count,
-                ff,
-                ff_count,
-                ee,
-                ee_count,
-                ef,
-                ef_count,
-            ) = cls.compute_adjacency_info(vertices, faces)
-            return cls(
-                vertices,
-                faces,
-                uvs,
-                face_textures,
-                textures,
-                edges,
-                edge2key,
-                vv,
-                vv_count,
-                vf,
-                vf_count,
-                ve,
-                ve_count,
-                ff,
-                ff_count,
-                ef,
-                ef_count,
-                ee,
-                ee_count,
-            )
-        else:
-            return cls(
-                vertices,
-                faces,
-                uvs,
-                face_textures,
-                textures,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-            )
-
-    @_composedecorator(classmethod, abstractmethod)
-    def from_obj(
-        self,
-        filename: str,
-        with_vt: bool = False,
-        enable_adjacency: bool = False,
-        texture_res=4,
-    ):
-        r"""Loads object in .obj wavefront format.
-
-        Args:
-            filename (str) : location of file.
-            with_vt (bool): objects loaded with textures specified by vertex
-                textures.
-            enable_adjacency (bool): adjacency information is computed.
-            texture_res (int): resolution of loaded face colors.
-
-        Note: the with_vt parameter requires cuda.
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> mesh.vertices.shape
-            torch.Size([482, 3])
-            >>> mesh.faces.shape
-            torch.Size([960, 3])
-
-        """
-
-        # run through obj file and extract obj info
-        vertices = []
-        faces = []
-        face_textures = []
-        uvs = []
-        with open(filename, "r") as mesh:
-            for line in mesh:
-                data = line.split()
-
-                # data = [da for da in data if len(da) > 0]
-                if len(data) == 0:
-                    continue
-                if data[0] == "v":
-                    vertices.append([float(d) for d in data[1:]])
-
-                elif data[0] == "vt":
-                    uvs.append(data[1:])
-
-                elif data[0] == "f":
-                    if "//" in data[1]:
-                        data = [da.split("//") for da in data]
-                        faces.append([int(d[0]) for d in data[1:]])
-                        face_textures.append([int(d[1]) for d in data[1:]])
-                    elif "/" in data[1]:
-                        data = [da.split("/") for da in data]
-                        faces.append([int(d[0]) for d in data[1:]])
-                        face_textures.append([int(d[1]) for d in data[1:]])
-                    else:
-                        faces.append([int(d) for d in data[1:]])
-
-                        continue
-
-        vertices = torch.FloatTensor(np.array(vertices, dtype=np.float32))
-        faces = torch.LongTensor(faces) - 1
-
-        # compute texture info
-        textures = None
-        if with_vt:
-            with open(filename, "r") as f:
-                textures = None
-                for line in f:
-                    if line.startswith("mtllib"):
-                        filename_mtl = os.path.join(os.path.dirname(filename), line.split()[1])
-                        textures = self.load_textures(filename, filename_mtl, texture_res)
-
-                f.close()
-
-        if len(uvs) > 0:
-            uvs = torch.FloatTensor([float(el) for sublist in uvs for el in sublist]).view(-1, 2)
-        else:
-            uvs = None
-        if len(face_textures) > 0:
-            face_textures = torch.LongTensor(face_textures) - 1
-        else:
-            face_textures = None
-
-        if enable_adjacency:
-            (
-                edge2key,
-                edges,
-                vv,
-                vv_count,
-                ve,
-                ve_count,
-                vf,
-                vf_count,
-                ff,
-                ff_count,
-                ee,
-                ee_count,
-                ef,
-                ef_count,
-            ) = self.compute_adjacency_info(vertices, faces)
-        else:
-            (edge2key, edges, vv, vv_count, ve, ve_count, vf, vf_count, ff, ff_count, ee, ee_count, ef, ef_count,) = (
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-            )
-
-        output = self(
-            vertices,
-            faces,
-            uvs,
-            face_textures,
-            textures,
-            edges,
-            edge2key,
-            vv,
-            vv_count,
-            vf,
-            vf_count,
-            ve,
-            ve_count,
-            ff,
-            ff_count,
-            ef,
-            ef_count,
-            ee,
-            ee_count,
-        )
-        return output
-
-    @classmethod
-    def from_off(self, filename: str, enable_adjacency: Optional[bool] = False):
-        r"""Loads a mesh from a .off file.
-
-        Args:
-            filename (str): Path to the .off file.
-            enable_adjacency (str): Whether or not to compute adjacency info.
-
-        Returns:
-            (kaolin.rep.Mesh): Mesh object.
-
-        """
-        vertices = []
-        faces = []
-        num_vertices = 0
-        num_faces = 0
-        num_edges = 0
-        # Flag to store the number of vertices, faces, and edges that have
-        # been read.
-        read_vertices = 0
-        read_faces = 0
-        read_edgs = 0
-        # Flag to indicate whether or not metadata (number of vertices,
-        # number of faces, (optionally) number of edges) has been read.
-        # For .off files, metadata is the first valid line of each file
-        # (neglecting the "OFF" header).
-        metadata_read = False
-        with open(filename, "r") as infile:
-            for line in infile.readlines():
-                # Ignore comments
-                if line.startswith("#"):
-                    continue
-                if line.startswith("OFF"):
-                    continue
-                data = line.strip().split()
-                data = [da for da in data if len(da) > 0]
-                # Ignore blank lines
-                if len(data) == 0:
-                    continue
-                if metadata_read is False:
-                    num_vertices = int(data[0])
-                    num_faces = int(data[1])
-                    if len(data) == 3:
-                        num_edges = int(data[2])
-                    metadata_read = True
-                    continue
-                if read_vertices < num_vertices:
-                    vertices.append([float(d) for d in data])
-                    read_vertices += 1
-                    continue
-                if read_faces < num_faces:
-                    numedges = int(data[0])
-                    faces.append([int(d) for d in data[1 : 1 + numedges]])
-                    read_faces += 1
-                    continue
-                if read_edges < num_edges:
-                    edges.append([int(d) for d in data[1:]])
-                    read_edges += 1
-                    continue
-        vertices = torch.FloatTensor(np.array(vertices, dtype=np.float32))
-        faces = torch.LongTensor(np.array(faces, dtype=np.int64))
-
-        if enable_adjacency:
-            (
-                edge2key,
-                edges,
-                vv,
-                vv_count,
-                ve,
-                ve_count,
-                vf,
-                vf_count,
-                ff,
-                ff_count,
-                ee,
-                ee_count,
-                ef,
-                ef_count,
-            ) = self.compute_adjacency_info(vertices, faces)
-        else:
-            (edge2key, edges, vv, vv_count, ve, ve_count, vf, vf_count, ff, ff_count, ee, ee_count, ef, ef_count,) = (
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-                None,
-            )
-
-        return self(
-            vertices,
-            faces,
-            None,
-            None,
-            None,
-            edges,
-            edge2key,
-            vv,
-            vv_count,
-            vf,
-            vf_count,
-            ve,
-            ve_count,
-            ff,
-            ff_count,
-            ef,
-            ef_count,
-            ee,
-            ee_count,
-        )
-
-    @staticmethod
-    def _cuda_helper(tensor):
-        if tensor is not None:
-            return tensor.cuda()
-
-    @staticmethod
-    def _cpu_helper(tensor):
-        if tensor is not None:
-            return tensor.cpu()
-
-    @staticmethod
-    def _to_helper(tensor, device):
-        if tensor is not None:
-            return tensor.to(device)
-
-    def cuda(self):
-        r""" "Maps all tensors of the current class to CUDA."""
-
-        self.vertices = self._cuda_helper(self.vertices)
-
-        self.faces = self._cuda_helper(self.faces)
-        self.uvs = self._cuda_helper(self.uvs)
-        self.face_textures = self._cuda_helper(self.face_textures)
-        self.textures = self._cuda_helper(self.textures)
-        self.edges = self._cuda_helper(self.edges)
-        self.vv = self._cuda_helper(self.vv)
-        self.vv_count = self._cuda_helper(self.vv_count)
-        self.vf = self._cuda_helper(self.vf)
-        self.vf_count = self._cuda_helper(self.vf_count)
-        self.ve = self._cuda_helper(self.ve)
-        self.ve_count = self._cuda_helper(self.ve_count)
-        self.ff = self._cuda_helper(self.ff)
-        self.ff_count = self._cuda_helper(self.ff_count)
-        self.ef = self._cuda_helper(self.ef)
-        self.ef_count = self._cuda_helper(self.ef_count)
-        self.ee = self._cuda_helper(self.ee)
-        self.ee_count = self._cuda_helper(self.ee_count)
-
-        self.device = self.vertices.device
-
-    def cpu(self):
-        r""" "Maps all tensors of the current class to CPU."""
-
-        self.vertices = self._cpu_helper(self.vertices)
-
-        self.faces = self._cpu_helper(self.faces)
-        self.uvs = self._cpu_helper(self.uvs)
-        self.face_textures = self._cpu_helper(self.face_textures)
-        self.textures = self._cpu_helper(self.textures)
-        self.edges = self._cpu_helper(self.edges)
-        self.vv = self._cpu_helper(self.vv)
-        self.vv_count = self._cpu_helper(self.vv_count)
-        self.vf = self._cpu_helper(self.vf)
-        self.vf_count = self._cpu_helper(self.vf_count)
-        self.ve = self._cpu_helper(self.ve)
-        self.ve_count = self._cpu_helper(self.ve_count)
-        self.ff = self._cpu_helper(self.ff)
-        self.ff_count = self._cpu_helper(self.ff_count)
-        self.ef = self._cpu_helper(self.ef)
-        self.ef_count = self._cpu_helper(self.ef_count)
-        self.ee = self._cpu_helper(self.ee)
-        self.ee_count = self._cpu_helper(self.ee_count)
-
-        self.device = self.vertices.device
-
-    def to(self, device):
-        r"""Maps all tensors of the current class to the specified device."""
-
-        self.vertices = self._to_helper(self.vertices, device)
-
-        self.faces = self._to_helper(self.faces, device)
-        self.uvs = self._to_helper(self.uvs, device)
-        self.face_textures = self._to_helper(self.face_textures, device)
-        self.textures = self._to_helper(self.textures, device)
-        self.edges = self._to_helper(self.edges, device)
-        self.vv = self._to_helper(self.vv, device)
-        self.vv_count = self._to_helper(self.vv_count, device)
-        self.vf = self._to_helper(self.vf, device)
-        self.vf_count = self._to_helper(self.vf_count, device)
-        self.ve = self._to_helper(self.ve, device)
-        self.ve_count = self._to_helper(self.ve_count, device)
-        self.ff = self._to_helper(self.ff, device)
-        self.ff_count = self._to_helper(self.ff_count, device)
-        self.ef = self._to_helper(self.ef, device)
-        self.ef_count = self._to_helper(self.ef_count, device)
-        self.ee = self._to_helper(self.ee, device)
-        self.ee_count = self._to_helper(self.ee_count, device)
-
-        self.device = self.vertices.device
-
-    @staticmethod
-    def load_mtl(filename_mtl: str):
-        r"""Returns all colours and texture files found in an mtl files.
-
-        Args:
-                filename_mtl (str) : mtl file name
-
-        """
-        texture_filenames = {}
-        colors = {}
-        material_name = ""
-        with open(filename_mtl) as f:
-            for line in f.readlines():
-                if len(line.split()) != 0:
-                    if line.split()[0] == "newmtl":
-                        material_name = line.split()[1]
-                    if line.split()[0] == "map_Kd":
-                        texture_filenames[material_name] = line.split()[1]
-                    if line.split()[0] == "Kd":
-                        colors[material_name] = np.array(list(map(float, line.split()[1:4])))
-        return colors, texture_filenames
-
-    @classmethod
-    def load_textures(self, filename_obj: str, filename_mtl: str, texture_res: int):
-        r"""Returns texture for a given obj file, where texture is
-        defined using vertex texture uvs.
-
-        Args:
-            filename_obj (str) : obj file name
-            filename_mtl (str) : mtl file name
-            texture_res  (int) : texture resolution for each face
-
-
-        Returns:
-           textures (torch.Tensor) : texture values for each face
-
-        """
-        assert torch.cuda.is_available()
-        vertices = []
-        with open(filename_obj) as f:
-            lines = f.readlines()
-        for line in lines:
-            if len(line.split()) == 0:
-                continue
-            if line.split()[0] == "vt":
-                vertices.append([float(v) for v in line.split()[1:3]])
-        vertices = np.vstack(vertices).astype(np.float32)
-
-        # load faces for textures
-        faces = []
-        material_names = []
-        material_name = ""
-        for line in lines:
-            if len(line.split()) == 0:
-                continue
-            if line.split()[0] == "f":
-                vs = line.split()[1:]
-                nv = len(vs)
-                if "/" in vs[0] and "//" not in vs[0]:
-                    v0 = int(vs[0].split("/")[1])
-                else:
-                    v0 = 0
-                for i in range(nv - 2):
-                    if "/" in vs[i + 1] and "//" not in vs[i + 1]:
-                        v1 = int(vs[i + 1].split("/")[1])
-                    else:
-                        v1 = 0
-                    if "/" in vs[i + 2] and "//" not in vs[i + 2]:
-                        v2 = int(vs[i + 2].split("/")[1])
-                    else:
-                        v2 = 0
-                    faces.append((v0, v1, v2))
-                    material_names.append(material_name)
-            if line.split()[0] == "usemtl":
-                material_name = line.split()[1]
-        faces = np.vstack(faces).astype(np.int32) - 1
-        faces = vertices[faces]
-        faces = torch.from_numpy(faces).cuda()
-        faces[1 < faces] = faces[1 < faces] % 1
-
-        colors, texture_filenames = self.load_mtl(filename_mtl)
-        textures = torch.ones(faces.shape[0], texture_res**2, 3, dtype=torch.float32)
-        textures = textures.cuda()
-
-        for material_name, color in list(colors.items()):
-            color = torch.from_numpy(color).cuda()
-            for i, material_name_f in enumerate(material_names):
-                if material_name == material_name_f:
-                    textures[i, :, :] = color[None, :]
-
-        for material_name, filename_texture in list(texture_filenames.items()):
-            filename_texture = os.path.join(os.path.dirname(filename_obj), filename_texture)
-            image = np.array(Image.open(filename_texture)).astype(np.float32) / 255.0
-
-            # texture image may have one channel (grey color)
-            if len(image.shape) == 2:
-                image = np.stack((image,) * 3, -1)
-            # or has extral alpha channel shoule ignore for now
-            if image.shape[2] == 4:
-                image = image[:, :, :3]
-
-            # pytorch does not support negative slicing for the moment
-            image = image[::-1, :, :]
-            image = torch.from_numpy(image.copy()).cuda()
-            is_update = (np.array(material_names) == material_name).astype(np.int32)
-            is_update = torch.from_numpy(is_update).cuda()
-            textures = load_textures_cuda.load_textures(image, faces, textures, is_update)
-        return textures
-
-    @staticmethod
-    def get_edges_from_face(f: torch.Tensor):
-        """Returns a list of edges forming the current face.
-
-        Args:
-            f: Face (quadruplet of indices into 'vertices').
-            vertices (torch.Tensor): Vertices (3D points).
-
-        Returns:
-            edge_inds (list): List of tuples (a, b) for each edge (a, b) in
-                faces.
-        """
-        _assert_tensor(f)
-        n = f.numel()
-        edges = []
-        for i in range(n):
-            if f[i] < f[(i + 1) % n]:
-                edges.append((f[i].item(), f[(i + 1) % n].item()))
-            else:
-                edges.append((f[(i + 1) % n].item(), f[i].item()))
-        return edges
-
-    @staticmethod
-    def get_edge_order(a: int, b: int):
-        """Returns (a, b) or (b, a), depending on which is smaller. (Smaller
-        element first, for unique keys)
-
-        Args:
-            a (int): Index of first vertex in edge.
-            b (int): Index of second vertex in edge.
-        """
-        return (a, b) if a < b else (b, a)
-
-    @staticmethod
-    def has_common_vertex(e1: torch.Tensor, e2: torch.Tensor):
-        r"""Returns True if the vertices e1, e2 share a common vertex,
-        False otherwise.
-
-        Args:
-            e1 (torch.Tensor): First edge (shape: :math:`2`).
-            e2 (torch.Tensor): Second edge (shape: :math: `2`).
-
-        Returns:
-            (bool): Whether or not e1 and e2 share a common vertex.
-
-        """
-        return (e1[0] in e2) or (e1[1] in e2)
-
-    @staticmethod
-    def get_common_vertex(e1: torch.Tensor, e2: torch.Tensor):
-        r"""Returns the common vertex in edges e1 and e2 (if any).
-
-        Args:
-            e1 (torch.Tensor): First edge (shape: :math:`2`).
-            e2 (torch.Tensor): Second edge (shape: :math:`2`).
-
-        Returns:
-            common_vertex (torch.LongTensor): Index of common vertex
-                    (shape: :math:`1`).
-            first_nbr (torch.LongTensor): Index of one neighbouring
-                    vertex of the common vertex (shape: :math:`1`).
-            second_nbr (torch.LongTensor): Index of the other neighbouring
-                    vertex of the common vertex (shape: :math:`1`).
-
-        """
-        if e1[0] == e2[0]:
-            return e1[0], e1[1], e2[1]
-        if e1[0] == e2[1]:
-            return e1[0], e1[1], e2[0]
-        if e1[1] == e2[0]:
-            return e1[1], e1[0], e2[1]
-        if e1[1] == e2[1]:
-            return e1[1], e1[0], e2[0]
-        return None, None, None
-
-    @staticmethod
-    def list_of_lists_to_matrix(
-        list_of_lists: list,
-        sublist_lengths: torch.Tensor,
-        matrix: torch.Tensor,
-    ):
-        r"""Takes a list of lists (each sub-list of variable size), and maps it
-        to a matrix. Decorated by numba, for efficiency sake.
-
-        Args:
-            list_of_lists (list): A list containing 'sub-'lists (Note: the sub-list
-                    cannont contain lists; needs to contain numbers).
-            sublist_lengths (torch.Tensor): Array containing lengths of each sublist.
-            matrix (torch.Tensor): Matrix in which to `mould` the list
-                    (Note: the matrix must contain as many columns as required to
-                    encapsulate the largest sub-list of `list_of_lists`).
-
-        """
-        for i in range(matrix.shape[0]):
-            l = sublist_lengths[i]
-            if l > 0:
-                matrix[i, 0:l] = list_of_lists[i]
-        return matrix
-
-    @staticmethod
-    def compute_adjacency_info(vertices: torch.Tensor, faces: torch.Tensor):
-        """Build data structures to help speed up connectivity queries. Assumes
-        a homogeneous mesh, i.e., each face has the same number of vertices.
-
-        The outputs have the following format: AA, AA_count
-        AA_count: [count_0, ..., count_n]
-        with AA:
-        [[aa_{0,0}, ..., aa_{0,count_0} (, -1, ..., -1)],
-         [aa_{1,0}, ..., aa_{1,count_1} (, -1, ..., -1)],
-                    ...
-         [aa_{n,0}, ..., aa_{n,count_n} (, -1, ..., -1)]]
-        """
-
-        device = vertices.device
-        facesize = faces.shape[1]
-        nb_vertices = vertices.shape[0]
-        nb_faces = faces.shape[0]
-        edges = torch.cat(
-            [faces[:, i : i + 2] for i in range(facesize - 1)] + [faces[:, [-1, 0]]],
-            dim=0,
-        )
-        # Sort the vertex of edges in increasing order
-        edges = torch.sort(edges, dim=1)[0]
-        # id of corresponding face in edges
-        face_ids = torch.arange(nb_faces, device=device, dtype=torch.long).repeat(facesize)
-        # remove multiple occurences and sort by the first vertex
-        # the edge key / id is fixed from now as the first axis position
-        # edges_ids will give the key of the edges on the original vector
-        edges, edges_ids = torch.unique(edges, sorted=True, return_inverse=True, dim=0)
-        nb_edges = edges.shape[0]
-
-        # EDGE2FACE
-        sorted_edges_ids, order_edges_ids = torch.sort(edges_ids)
-        sorted_faces_ids = face_ids[order_edges_ids]
-        # indices of first occurences of each key
-        idx_first = torch.where(
-            torch.nn.functional.pad(sorted_edges_ids[1:] != sorted_edges_ids[:-1], (1, 0), value=1)
-        )[0]
-        nb_faces_per_edge = idx_first[1:] - idx_first[:-1]
-        # compute sub_idx (2nd axis indices to store the faces)
-        offsets = torch.zeros(sorted_edges_ids.shape[0], device=device, dtype=torch.long)
-        offsets[idx_first[1:]] = nb_faces_per_edge
-        sub_idx = torch.arange(sorted_edges_ids.shape[0], device=device, dtype=torch.long) - torch.cumsum(
-            offsets, dim=0
-        )
-        # TODO(cfujitsang): potential way to compute sub_idx differently
-        #                   to test with bigger model
-        # sub_idx = torch.ones(sorted_edges_ids.shape[0], device=device, dtype=torch.long)
-        # sub_idx[0] = 0
-        # sub_idx[idx_first[1:]] = 1 - nb_faces_per_edge
-        # sub_idx = torch.cumsum(sub_idx, dim=0)
-        nb_faces_per_edge = torch.cat(
-            [nb_faces_per_edge, sorted_edges_ids.shape[0] - idx_first[-1:]],
-            dim=0,
-        )
-        max_sub_idx = torch.max(nb_faces_per_edge)
-        ef = torch.zeros((nb_edges, max_sub_idx), device=device, dtype=torch.long) - 1
-        ef[sorted_edges_ids, sub_idx] = sorted_faces_ids
-        # FACE2FACES
-        nb_faces_per_face = (
-            torch.stack(
-                [nb_faces_per_edge[edges_ids[i * nb_faces : (i + 1) * nb_faces]] for i in range(facesize)],
-                dim=1,
-            ).sum(dim=1)
-            - facesize
-        )
-        ff = torch.cat(
-            [ef[edges_ids[i * nb_faces : (i + 1) * nb_faces]] for i in range(facesize)],
-            dim=1,
-        )
-        # remove self occurences
-        ff[ff == torch.arange(nb_faces, device=device, dtype=torch.long).view(-1, 1)] = -1
-        ff = torch.sort(ff, dim=-1, descending=True)[0]
-        to_del = (ff[:, 1:] == ff[:, :-1]) & (ff[:, 1:] != -1)
-        ff[:, 1:][to_del] = -1
-        nb_faces_per_face = nb_faces_per_face - torch.sum(to_del, dim=1)
-        max_sub_idx = torch.max(nb_faces_per_face)
-        ff = torch.sort(ff, dim=-1, descending=True)[0][:, :max_sub_idx]
-
-        # VERTEX2VERTICES and VERTEX2EDGES
-        npy_edges = edges.cpu().numpy()
-        edge2key = {tuple(npy_edges[i]): i for i in range(nb_edges)}
-        # _edges and double_edges 2nd axis correspond to the triplet:
-        # [left vertex, right vertex, edge key]
-        _edges = torch.cat([edges, torch.arange(nb_edges, device=device).view(-1, 1)], dim=1)
-        double_edges = torch.cat([_edges, _edges[:, [1, 0, 2]]], dim=0)
-        double_edges = torch.unique(double_edges, sorted=True, dim=0)
-        # TODO(cfujitsang): potential improvment, to test with bigger model:
-        # double_edges0, order_double_edges = torch.sort(double_edges[0])
-        nb_double_edges = double_edges.shape[0]
-        # indices of first occurences of each key
-        idx_first = torch.where(torch.nn.functional.pad(double_edges[1:, 0] != double_edges[:-1, 0], (1, 0), value=1))[
-            0
-        ]
-        nb_edges_per_vertex = idx_first[1:] - idx_first[:-1]
-        # compute sub_idx (2nd axis indices to store the edges)
-        offsets = torch.zeros(nb_double_edges, device=device, dtype=torch.long)
-        offsets[idx_first[1:]] = nb_edges_per_vertex
-        sub_idx = torch.arange(nb_double_edges, device=device, dtype=torch.long) - torch.cumsum(offsets, dim=0)
-        nb_edges_per_vertex = torch.cat([nb_edges_per_vertex, nb_double_edges - idx_first[-1:]], dim=0)
-        max_sub_idx = torch.max(nb_edges_per_vertex)
-        vv = torch.zeros((nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
-        vv[double_edges[:, 0], sub_idx] = double_edges[:, 1]
-        ve = torch.zeros((nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
-        ve[double_edges[:, 0], sub_idx] = double_edges[:, 2]
-        # EDGE2EDGES
-        ee = torch.cat([ve[edges[:, 0], :], ve[edges[:, 1], :]], dim=1)
-        nb_edges_per_edge = nb_edges_per_vertex[edges[:, 0]] + nb_edges_per_vertex[edges[:, 1]] - 2
-        max_sub_idx = torch.max(nb_edges_per_edge)
-        # remove self occurences
-        ee[ee == torch.arange(nb_edges, device=device, dtype=torch.long).view(-1, 1)] = -1
-        ee = torch.sort(ee, dim=-1, descending=True)[0][:, :max_sub_idx]
-        # VERTEX2FACES
-        vertex_ordered, order_vertex = torch.sort(faces.view(-1))
-        face_ids_in_vertex_order = order_vertex / facesize
-        # indices of first occurences of each id
-        idx_first = torch.where(torch.nn.functional.pad(vertex_ordered[1:] != vertex_ordered[:-1], (1, 0), value=1))[0]
-        nb_faces_per_vertex = idx_first[1:] - idx_first[:-1]
-        # compute sub_idx (2nd axis indices to store the faces)
-        offsets = torch.zeros(vertex_ordered.shape[0], device=device, dtype=torch.long)
-        offsets[idx_first[1:]] = nb_faces_per_vertex
-        sub_idx = torch.arange(vertex_ordered.shape[0], device=device, dtype=torch.long) - torch.cumsum(offsets, dim=0)
-        # TODO(cfujitsang): it seems that nb_faces_per_vertex == nb_edges_per_vertex ?
-        nb_faces_per_vertex = torch.cat(
-            [nb_faces_per_vertex, vertex_ordered.shape[0] - idx_first[-1:]],
-            dim=0,
-        )
-        max_sub_idx = torch.max(nb_faces_per_vertex)
-        vf = torch.zeros((nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
-        vf[vertex_ordered, sub_idx] = face_ids_in_vertex_order
-
-        return (
-            edge2key,
-            edges,
-            vv,
-            nb_edges_per_vertex,
-            ve,
-            nb_edges_per_vertex,
-            vf,
-            nb_faces_per_vertex,
-            ff,
-            nb_faces_per_face,
-            ee,
-            nb_edges_per_edge,
-            ef,
-            nb_faces_per_edge,
-        )
-
-    @staticmethod
-    def old_compute_adjacency_info(vertices: torch.Tensor, faces: torch.Tensor):
-        """Build data structures to help speed up connectivity queries.
-
-        Assumes a homogeneous mesh, i.e., each face has the same number
-        of vertices.
-        """
-
-        device = vertices.device
-
-        facesize = faces.shape[1]
-
-        # Dictionary to hash each edge
-        edge2key = dict()
-        # List of edges
-        edges = []
-        # List of neighboring vertices to each vertex
-        vertex_vertex_nbd = [set() for _ in vertices]
-        # List of neighboring edges to each vertex
-        vertex_edge_nbd = [set() for _ in vertices]
-        # List of neighboring faces to each vertex
-        vertex_face_nbd = [set() for _ in vertices]
-        # List of neighboring edges to each edge
-        edge_edge_nbd = []
-        # List of neighboring faces to each edge
-        edge_face_nbd = []
-        # List of neighboring faces to each face
-        face_face_nbd = [set() for _ in faces]
-        # Counter for edges
-        num_edges = 0
-
-        for fid, f in enumerate(faces):
-
-            # Get a list of edges in the current face
-            face_edges = Mesh.get_edges_from_face(f)
-            # Run a pass through the edges, and add any new
-            # edges found, to the list of edges. Also, initialize
-            # corresponding neighborhood info.
-            for idx, edge in enumerate(face_edges):
-                if edge not in edge2key:
-                    edge2key[edge] = num_edges
-                    edges.append(list(edge))
-                    edge_edge_nbd.append([])
-                    edge_face_nbd.append([fid])
-                    vertex_edge_nbd[edge[0]].add(num_edges)
-                    vertex_edge_nbd[edge[1]].add(num_edges)
-                    num_edges += 1
-            # Now, run another pass through the edges, this time to
-            # compute adjacency info.
-            for idx, edge in enumerate(face_edges):
-                k = edge2key[edge]
-                for j in range(1, facesize):
-                    q = edge2key[face_edges[(idx + j) % facesize]]
-                    common_vtx, first_nbr, second_nbr = Mesh.get_common_vertex(edges[k], edges[q])
-                    if common_vtx:
-                        edge_edge_nbd[k].append(q)
-                        vertex_vertex_nbd[common_vtx].add(first_nbr)
-                        vertex_vertex_nbd[common_vtx].add(second_nbr)
-                        vertex_vertex_nbd[first_nbr].add(common_vtx)
-                        vertex_vertex_nbd[second_nbr].add(common_vtx)
-
-                # q = edge2key[face_edges[(idx+1)%facesize]]
-                # r = edge2key[face_edges[(idx+2)%facesize]]
-                # s = edge2key[face_edges[(idx+3)%facesize]]
-                # if Mesh.has_common_vertex(edges[k], edges[q]):
-                #     edge_edge_nbd[k].append(q)
-                # if Mesh.has_common_vertex(edges[k], edges[r]):
-                #     edge_edge_nbd[k].append(r)
-                # if Mesh.has_common_vertex(edges[k], edges[s]):
-                #     edge_edge_nbd[k].append(s)
-                if fid not in edge_face_nbd[k]:
-                    edge_face_nbd[k].append(fid)
-                vertex_edge_nbd[edge[0]].add(k)
-                vertex_edge_nbd[edge[1]].add(k)
-                vertex_face_nbd[edge[0]].add(fid)
-                vertex_face_nbd[edge[1]].add(fid)
-        # Compute face-face adjacency info
-        for fid, f in enumerate(faces):
-            face_edges = Mesh.get_edges_from_face(f)
-            for idx, edge in enumerate(face_edges):
-                k = edge2key[edge]
-                for nbr in edge_face_nbd[k]:
-                    if nbr == fid:
-                        continue
-                    face_face_nbd[fid].add(nbr)
-
-        # Helper variables
-        N = vertices.shape[0]
-        M = len(edges)
-        P = faces.shape[0]
-
-        # Convert sets to lists in vertex_edge_nbd, vertex_face_nbd, and
-        # face_face_nbd
-        vertex_vertex_nbd = [torch.Tensor(list(l)).long().to(device) for l in vertex_vertex_nbd]
-        vertex_edge_nbd = [torch.Tensor(list(l)).long().to(device) for l in vertex_edge_nbd]
-        vertex_face_nbd = [torch.Tensor(list(l)).long().to(device) for l in vertex_face_nbd]
-        face_face_nbd = [torch.Tensor(list(l)).long().to(device) for l in face_face_nbd]
-        edge_edge_nbd = [torch.Tensor(l).long().to(device) for l in edge_edge_nbd]
-        edge_face_nbd = [torch.Tensor(l).long().to(device) for l in edge_face_nbd]
-
-        # Map vertex_vertex_nbd to a matrix
-        vv_count = torch.Tensor([len(l) for l in vertex_vertex_nbd]).long()
-        vv_max = max(vv_count)
-        vv = -torch.ones((N, vv_max)).long().to(device)
-        vv = Mesh.list_of_lists_to_matrix(vertex_vertex_nbd, vv_count, vv)
-
-        # Map vertex_edge_nbd to a matrix
-        ve_count = torch.Tensor([len(l) for l in vertex_edge_nbd]).long()
-        ve_max = max(ve_count)
-        ve = -torch.ones((N, ve_max)).long().to(device)
-        ve = Mesh.list_of_lists_to_matrix(vertex_edge_nbd, ve_count, ve)
-
-        # Map vertex_face_nbd to a matrix
-        vf_count = torch.Tensor([len(l) for l in vertex_face_nbd]).long()
-        vf_max = max(vf_count)
-        vf = -torch.ones((N, vf_max)).long().to(device)
-        vf = Mesh.list_of_lists_to_matrix(vertex_face_nbd, vf_count, vf)
-
-        # Map edge_edge_nbd to a matrix
-        ee_count = torch.Tensor([len(l) for l in edge_edge_nbd]).long()
-        ee_max = max(ee_count)
-        ee = -torch.ones((M, ee_max)).long().to(device)
-        ee = Mesh.list_of_lists_to_matrix(edge_edge_nbd, ee_count, ee)
-
-        # Map edge_face_nbd to a matrix
-        ef_count = torch.Tensor([len(l) for l in edge_face_nbd]).long()
-        ef_max = max(ef_count)
-        ef = -torch.ones((M, ef_max)).long().to(device)
-        ef = Mesh.list_of_lists_to_matrix(edge_face_nbd, ef_count, ef)
-
-        # Map face_face_nbd to a matrix
-        ff_count = torch.Tensor([len(l) for l in face_face_nbd]).long()
-        ff_max = max(ff_count)
-        ff = -torch.ones((P, ff_max)).long().to(device)
-        ff = Mesh.list_of_lists_to_matrix(face_face_nbd, ff_count, ff)
-
-        # Convert to numpy arrays
-        edges = torch.Tensor(edges).long().to(device)
-
-        return (
-            edge2key,
-            edges,
-            vv,
-            vv_count,
-            ve,
-            ve_count,
-            vf,
-            vf_count,
-            ff,
-            ff_count,
-            ee,
-            ee_count,
-            ef,
-            ef_count,
-        )
-
-    def laplacian_smoothing(self, iterations: int = 1):
-        r"""Applies laplacian smoothing to the mesh.
-
-        Args:
-            iterations (int) : number of iterations to run the algorithm for.
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> mesh.compute_laplacian().abs().mean()
-            tensor(0.0010)
-            >>> mesh.laplacian_smoothing(iterations=3)
-            >>> mesh.compute_laplacian().abs().mean()
-            tensor(9.9956e-05)
-        """
-
-        adj_sparse = self.compute_adjacency_matrix_sparse()
-
-        neighbor_num = torch.sparse.sum(adj_sparse, dim=1).to_dense().view(-1, 1)
-
-        for _ in range(iterations):
-            neighbor_sum = torch.sparse.mm(adj_sparse, self.vertices)
-            self.vertices = neighbor_sum / neighbor_num
-
-    def compute_laplacian(self):
-        r"""Calcualtes the laplcaian of the graph, meaning the average
-            difference between a vertex and its neighbors.
-
-        Returns:
-            (FloatTensor) : laplacian of the mesh.
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> lap = mesh.compute_laplacian()
-
-        """
-
-        adj_sparse = self.compute_adjacency_matrix_sparse()
-
-        neighbor_sum = torch.sparse.mm(adj_sparse, self.vertices) - self.vertices
-        neighbor_num = torch.sparse.sum(adj_sparse, dim=1).to_dense().view(-1, 1) - 1
-        neighbor_num[neighbor_num == 0] = 1
-        neighbor_num = (1.0 / neighbor_num).view(-1, 1)
-
-        neighbor_sum = neighbor_sum * neighbor_num
-        lap = self.vertices - neighbor_sum
-        return lap
-
-    def show(self):
-        r"""Visuailizes the mesh.
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> mesh.show()
-
-        """
-
-        kal.visualize.show_mesh(self)
-
-    def save_tensors(self, filename: str):
-        r"""Saves the tensor information of the mesh in a numpy .npz format.
-
-        Args:
-            filename: the file name to save the file under
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> mesh.save_tensors()
-
-        """
-        np.savez(
-            filename,
-            vertices=self.vertices.data.cpu().numpy(),
-            faces=self.faces.data.cpu().numpy(),
-        )
-
-    @staticmethod
-    def normalize_zerosafe(matrix: torch.Tensor):
-        """Normalizes each row of a matrix in a 'division by zero'-safe way.
-
-        Args:
-            matrix (torch.tensor): Matrix where each row contains a vector
-                to be normalized.
-        """
-
-        assert matrix.dim() == 2, "Need matrix to contain exactly 2 dimensions"
-        magnitude = torch.sqrt(torch.sum(torch.pow(matrix, 2), dim=1))
-        valid_inds = magnitude > 0
-        matrix[valid_inds] = torch.div(matrix[valid_inds], magnitude[valid_inds].unsqueeze(1))
-        return matrix
-
-    def sample(self, num_points):
-        raise NotImplementedError
-
-    def compute_vertex_normals(self):
-        raise NotImplementedError
-
-    def compute_edge_lengths(self):
-        raise NotImplementedError
-
-    def compute_face_areas(self):
-        raise NotImplementedError
-
-    def compute_interior_angles_per_edge(self):
-        raise NotImplementedError
-
-    def compute_dihedral_angles_per_edge(self):
-        raise NotImplementedError
diff --git a/lib/dr_utils/rep/TriangleMesh.py b/lib/dr_utils/rep/TriangleMesh.py
deleted file mode 100644
index 524897dfbe1cbade9fc341d55e7c210c742ed4cc..0000000000000000000000000000000000000000
--- a/lib/dr_utils/rep/TriangleMesh.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from abc import abstractmethod
-
-import torch
-import numpy as np
-import torch.nn.functional as F
-
-
-from .helpers import _composedecorator
-
-# from kaolin.rep.Mesh import Mesh
-from .Mesh import Mesh
-
-
-class TriangleMesh(Mesh):
-    """Abstract class to represent 3D Trianlge meshes."""
-
-    def __init__(
-        self,
-        vertices: torch.Tensor,
-        faces: torch.Tensor,
-        uvs: torch.Tensor,
-        face_textures: torch.Tensor,
-        textures: torch.Tensor,
-        edges: torch.Tensor,
-        edge2key: dict,
-        vv: torch.Tensor,
-        vv_count: torch.Tensor,
-        vf: torch.Tensor,
-        vf_count: torch.Tensor,
-        ve: torch.Tensor,
-        ve_count: torch.Tensor,
-        ff: torch.Tensor,
-        ff_count: torch.Tensor,
-        ef: torch.Tensor,
-        ef_count: torch.Tensor,
-        ee: torch.Tensor,
-        ee_count: torch.Tensor,
-    ):
-
-        # Vertices of the mesh
-        self.vertices = vertices
-        # Faces of the mesh
-        self.faces = faces
-        # uv coordinates of each vertex
-        self.uvs = uvs
-        # uv indecies for each face
-        self.face_textures = face_textures
-        # texture for each face
-        self.textures = textures
-        # Edges of the mesh
-        self.edges = edges
-        # Dictionary that maps an edge (tuple) to an edge idx
-        self.edge2key = edge2key
-        # Vertex-Vertex neighborhood tensor (for each vertex, contains
-        # indices of the vertices neighboring it)
-        self.vv = vv
-        # Number of vertices neighbouring each vertex
-        self.vv_count = vv_count
-        # Vertex-Face neighborhood tensor
-        self.vf = vf
-        # Number of faces neighbouring each vertex
-        self.vf_count = vf_count
-        # Vertex-Edge neighborhood tensor
-        self.ve = ve
-        # Number of edges neighboring each vertex
-        self.ve_count = ve_count
-        # Face-Face neighborhood tensor
-        self.ff = ff
-        # Number of faces neighbouring each face
-        self.ff_count = ff_count
-        # Edge-Face neighbourhood tensor
-        self.ef = ef
-        # Number of edges neighbouring each face
-        self.ef_count = ef_count
-        # Edge-Edge neighbourhood tensor
-        self.ee = ee
-        # Number of edges neighbouring each edge
-        self.ee_count = ee_count
-        # adjacency matrix for verts
-        self.adj = None
-
-        # Initialize device on which tensors reside.
-        self.device = self.vertices.device
-
-    @staticmethod
-    def normalize_zerosafe(matrix):
-        """Normalizes each row of a matrix in a 'division by zero'-safe way.
-
-        Args:
-            matrix (torch.tensor): Matrix where each row contains a vector
-                to be normalized
-        """
-
-        assert matrix.dim() == 2, "Need matrix to contain exactly 2 dimensions"
-        magnitude = torch.sqrt(torch.sum(torch.pow(matrix, 2), dim=1))
-        valid_inds = magnitude > 0
-        matrix[valid_inds] = torch.div(matrix[valid_inds], magnitude[valid_inds].unsqueeze(1))
-        return matrix
-
-    def compute_vertex_normals(self):
-        """Compute vertex normals for each mesh vertex."""
-
-        # Let each face ordering be denoted a, b, c, d. For consistent order,
-        # we vectorize operations, so that a (for example) denotes the first
-        # vertex of each face in the mesh.
-        a = torch.index_select(self.vertices, dim=0, index=self.faces[:, 0].flatten())
-        b = torch.index_select(self.vertices, dim=0, index=self.faces[:, 1].flatten())
-        c = torch.index_select(self.vertices, dim=0, index=self.faces[:, 2].flatten())
-
-        # Compute vertex normals.
-        # Eg. Normals for vertices 'a' are given by (b-a) x (c - a)
-        vn_a = TriangleMesh.normalize_zerosafe(torch.cross(b - a, c - a, dim=1))
-        vn_b = TriangleMesh.normalize_zerosafe(torch.cross(c - b, a - b, dim=1))
-        vn_c = TriangleMesh.normalize_zerosafe(torch.cross(a - c, b - c, dim=1))
-
-        # Using the above, we have duplicate vertex normals (since a vertex is
-        # usually a part of more than one face). We only select the first face
-        # each vertex is a 'neighbor' to, to avoid confusion.
-        face_inds = self.vf[:, 0]
-
-        # Now that we know which face each vertex belongs to, we need to find
-        # the index of the vertex in that selected face. (i.e., is the
-        # selected vertex the 'a', the 'b', the 'c', or the 'd' vertex of the
-        # face?).
-        vertex_inds = torch.arange(self.vertices.shape[0]).unsqueeze(1).to(self.vertices.device)
-        # Mask that specifies which index of each face to look at, for the
-        # vertex we wish to find.
-        mask_abc = self.faces[face_inds] == vertex_inds.repeat(1, 3)
-        mask_abc = mask_abc.cuda()
-
-        # Array to hold vertex normals
-        vn = torch.zeros_like(self.vertices)
-
-        inds = torch.nonzero(mask_abc[:, 0])
-        inds = torch.cat((inds, torch.zeros_like(inds)), dim=1)
-        vn[inds] = vn_a[face_inds[inds]]
-        inds = torch.nonzero(mask_abc[:, 1])
-        inds = torch.cat((inds, 1 * torch.ones_like(inds)), dim=1)
-        vn[inds] = vn_b[face_inds[inds]]
-        inds = torch.nonzero(mask_abc[:, 2])
-        inds = torch.cat((inds, 2 * torch.ones_like(inds)), dim=1)
-        vn[inds] = vn_c[face_inds[inds]]
-
-        return vn
-
-    def compute_face_normals(self):
-        r"""Compute normals for each face in the mesh."""
-
-        # Let each face be denoted (a, b, c). We vectorize operations, so,
-        # we take `a` to mean the "first vertex of every face", and so on.
-        a = torch.index_select(self.vertices, dim=0, index=self.faces[:, 0].flatten())
-        b = torch.index_select(self.vertices, dim=0, index=self.faces[:, 1].flatten())
-        c = torch.index_select(self.vertices, dim=0, index=self.faces[:, 2].flatten())
-
-        # Compute vertex normals (for each face). Note the the same vertex
-        # can have different normals for each face.
-        # Eg. Normals for vertices 'a' are given by (b-a) x (c - a)
-        vn_a = TriangleMesh.normalize_zerosafe(torch.cross(b - a, c - a, dim=1))
-        vn_b = TriangleMesh.normalize_zerosafe(torch.cross(c - b, a - b, dim=1))
-        vn_c = TriangleMesh.normalize_zerosafe(torch.cross(a - c, b - c, dim=1))
-        # Add and normalize the normals (for a more robust estimate)
-        face_normals = vn_a + vn_b + vn_c
-        face_normals_norm = face_normals.norm(dim=1)
-        face_normals = face_normals / torch.where(
-            face_normals_norm > 0, face_normals_norm, torch.ones_like(face_normals_norm)
-        ).view(-1, 1)
-        return face_normals
-
-    def compute_edge_lengths(self):
-        """Compute edge lengths for each edge of the mesh."""
-
-        self.edges = self.edges.to(self.vertices.device)
-        # Let each edge be denoted (a, b). We perform a vectorized select
-        # and then compute the magnitude of the vector b - a.
-        a = torch.index_select(self.vertices, dim=0, index=self.edges[:, 0].flatten())
-        b = torch.index_select(self.vertices, dim=0, index=self.edges[:, 1].flatten())
-        return (b - a).norm(dim=1)
-
-    def compute_face_areas(self):
-        raise NotImplementedError
-
-    def compute_interior_angles_per_edge(self):
-        raise NotImplementedError
-
-    def compute_dihedral_angles_per_edge(self):
-        raise NotImplementedError
-
-    def save_mesh(self, filename: str):
-        r"""Save a mesh to a wavefront .obj file format
-
-        Args:
-            filename (str) : target filename
-
-        """
-
-        with open(filename, "w") as f:
-
-            # write vertices
-            for vert in self.vertices:
-                f.write("v %f %f %f\n" % tuple(vert))
-            # write faces
-            for face in self.faces:
-                f.write("f %d %d %d\n" % tuple(face + 1))
-
-    def sample(self, num_samples: int, eps: float = 1e-10):
-        r"""Uniformly samples the surface of a mesh.
-
-        Args:
-            num_samples (int): number of points to sample
-            eps (float): a small number to prevent division by zero
-                         for small surface areas.
-
-        Returns:
-            (torch.Tensor, torch.Tensor) uniformly sampled points and
-                the face idexes which each point corresponds to.
-
-        Example:
-            >>> points, chosen_faces = mesh.sample(10)
-            >>> points
-            tensor([[ 0.0293,  0.2179,  0.2168],
-                    [ 0.2003, -0.3367,  0.2187],
-                    [ 0.2152, -0.0943,  0.1907],
-                    [-0.1852,  0.1686, -0.0522],
-                    [-0.2167,  0.3171,  0.0737],
-                    [ 0.2219, -0.0289,  0.1531],
-                    [ 0.2217, -0.0115,  0.1247],
-                    [-0.1400,  0.0364, -0.1618],
-                    [ 0.0658, -0.0310, -0.2198],
-                    [ 0.1926, -0.1867, -0.2153]])
-            >>> chosen_faces
-            tensor([ 953,  38,  6, 3480,  563,  393,  395, 3309, 373, 271])
-        """
-
-        if self.vertices.is_cuda:
-            dist_uni = torch.distributions.Uniform(torch.tensor([0.0]).cuda(), torch.tensor([1.0]).cuda())
-        else:
-            dist_uni = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([1.0]))
-
-        # calculate area of each face
-        x1, x2, x3 = torch.split(
-            torch.index_select(self.vertices, 0, self.faces[:, 0])
-            - torch.index_select(self.vertices, 0, self.faces[:, 1]),
-            1,
-            dim=1,
-        )
-        y1, y2, y3 = torch.split(
-            torch.index_select(self.vertices, 0, self.faces[:, 1])
-            - torch.index_select(self.vertices, 0, self.faces[:, 2]),
-            1,
-            dim=1,
-        )
-        a = (x2 * y3 - x3 * y2) ** 2
-        b = (x3 * y1 - x1 * y3) ** 2
-        c = (x1 * y2 - x2 * y1) ** 2
-        Areas = torch.sqrt(a + b + c) / 2
-        # percentage of each face w.r.t. full surface area
-        Areas = Areas / (torch.sum(Areas) + eps)
-
-        # define descrete distribution w.r.t. face area ratios caluclated
-        cat_dist = torch.distributions.Categorical(Areas.view(-1))
-        face_choices = cat_dist.sample([num_samples])
-
-        # from each face sample a point
-        select_faces = self.faces[face_choices]
-        v0 = torch.index_select(self.vertices, 0, select_faces[:, 0])
-        v1 = torch.index_select(self.vertices, 0, select_faces[:, 1])
-        v2 = torch.index_select(self.vertices, 0, select_faces[:, 2])
-        u = torch.sqrt(dist_uni.sample([num_samples]))
-        v = dist_uni.sample([num_samples])
-        points = (1 - u) * v0 + (u * (1 - v)) * v1 + u * v * v2
-
-        return points, face_choices
-
-    def compute_adjacency_matrix_full(self):
-        r"""Calcualtes a binary adjacency matrix for a mesh.
-
-        Returns:
-            (torch.Tensor) : binary adjacency matrix
-
-        Example:
-            >>> mesh = TriangleMesh.from_obj('model.obj')
-            >>> adj_info = mesh.compute_adjacency_matrix_full()
-            >>> neighborhood_sum = torch.mm( adj_info, mesh.vertices)
-        """
-
-        adj = torch.zeros((self.vertices.shape[0], self.vertices.shape[0])).to(self.vertices.device)
-        v1 = self.faces[:, 0]
-        v2 = self.faces[:, 1]
-        v3 = self.faces[:, 2]
-        adj[(v1, v1)] = 1
-        adj[(v2, v2)] = 1
-        adj[(v3, v3)] = 1
-        adj[(v1, v2)] = 1
-        adj[(v2, v1)] = 1
-        adj[(v1, v3)] = 1
-        adj[(v3, v1)] = 1
-        adj[(v2, v3)] = 1
-        adj[(v2, v3)] = 1
-
-        return adj
-
-    def load_tensors(filename: str, enable_adjacency: bool = False):
-        r"""Loads the tensor information of the mesh from a saved numpy array.
-
-        Args:
-            filename: Path of the file to load the file from.
-
-        Example:
-            >>> mesh = TriangleMesh.load_tensors('mesh.npy')
-
-        """
-        data = np.load(filename)
-
-        vertices = torch.FloatTensor(data["vertices"])
-        faces = torch.LongTensor(data["faces"].astype(int))
-
-        return TriangleMesh.from_tensors(vertices, faces)
-
-    def compute_adjacency_matrix_sparse(self):
-        r"""Calcualtes a sparse adjacency matrix for a mess
-
-        Returns:
-            (torch.sparse.Tensor) : sparse adjacency matrix
-
-        Example:
-            >>> mesh = Mesh.from_obj('model.obj')
-            >>> adj_info = mesh.compute_adjacency_matrix_sparse()
-            >>> neighborhood_sum = torch.sparse.mm(adj_info, mesh.vertices)
-
-        """
-
-        if self.adj is None:
-
-            v1 = self.faces[:, 0].view(-1, 1)
-            v2 = self.faces[:, 1].view(-1, 1)
-            v3 = self.faces[:, 2].view(-1, 1)
-
-            vert_len = self.vertices.shape[0]
-            identity_indices = torch.arange(vert_len).view(-1, 1).to(v1.device)
-            identity = torch.cat((identity_indices, identity_indices), dim=1).to(v1.device)
-            identity = torch.cat((identity, identity))
-
-            i_1 = torch.cat((v1, v2), dim=1)
-            i_2 = torch.cat((v1, v3), dim=1)
-
-            i_3 = torch.cat((v2, v1), dim=1)
-            i_4 = torch.cat((v2, v3), dim=1)
-
-            i_5 = torch.cat((v3, v2), dim=1)
-            i_6 = torch.cat((v3, v1), dim=1)
-            indices = torch.cat((identity, i_1, i_2, i_3, i_4, i_5, i_6), dim=0).t()
-            values = torch.ones(indices.shape[1]).to(indices.device) * 0.5
-            self.adj = torch.sparse.FloatTensor(indices, values, torch.Size([vert_len, vert_len]))
-        return self.adj.clone()
diff --git a/lib/dr_utils/rep/__init__.py b/lib/dr_utils/rep/__init__.py
deleted file mode 100644
index c411ad30da24c554843699085eceac41a06e4239..0000000000000000000000000000000000000000
--- a/lib/dr_utils/rep/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .Mesh import *
-from .TriangleMesh import *
diff --git a/lib/dr_utils/rep/helpers.py b/lib/dr_utils/rep/helpers.py
deleted file mode 100644
index 52419393e616733a9793920892f60210ef302979..0000000000000000000000000000000000000000
--- a/lib/dr_utils/rep/helpers.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Several helper functions, for internal use in Kaolin."""
-import torch
-import hashlib
-from pathlib import Path
-from typing import Callable
-import numpy as np
-
-
-def _composedecorator(*decs):
-    """Returns a composition of several decorators.
-    Source: https://stackoverflow.com/a/5409569
-    Usage::
-            @composedec(decorator1, decorator2)
-            def func_that_needs_decoration(args):
-                pass
-        is equavalent to::
-            @decorator1
-            @decorator2
-            def func_that_needs_decoration(args):
-                pass
-    """
-
-    def deco(f):
-        for dec in reversed(decs):
-            f = dec(f)
-        return f
-
-    return deco
-
-
-def _normalize_zerosafe(matrix: torch.Tensor):
-    """Normalizes each row of a matrix in a 'division by zero'-safe way.
-
-    Args:
-        matrix (torch.tensor): Matrix where each row contains a vector
-            to be normalized
-    """
-
-    assert matrix.dim() == 2, "Need matrix to contain exactly 2 dimensions"
-    magnitude = torch.sqrt(torch.sum(torch.pow(matrix, 2), dim=1))
-    valid_inds = magnitude > 0
-    matrix[valid_inds] = torch.div(matrix[valid_inds], magnitude[valid_inds].unsqueeze(1))
-    return matrix
-
-
-def _assert_tensor(inp):
-    """Asserts that the input is of type torch.Tensor."""
-    if not torch.is_tensor(inp):
-        raise TypeError("Expected input to be of type torch.Tensor." " Got {0} instead".format(type(inp)))
-
-
-def _assert_dim_gt(inp, tgt):
-    """Asserts that the number of dims in inp is greater than the value
-    sepecified in tgt.
-
-    Args:
-        inp (torch.Tensor): Input tensor, whose number of dimensions is
-            to be compared.
-        tgt (int): Value which the number of dims of inp should exceed.
-    """
-    if inp.dim() <= tgt:
-        raise ValueError("Expected input to contain more than {0} dims. " "Got {1} instead.".format(tgt, inp.dim()))
-
-
-def _assert_dim_lt(inp, tgt):
-    """Asserts that the number of dims in inp is less than the value sepecified
-    in tgt.
-
-    Args:
-        inp (torch.Tensor): Input tensor, whose number of dimensions is
-            to be compared.
-        tgt (int): Value which the number of dims of inp should be less than.
-    """
-    if not inp.dim() >= tgt:
-        raise ValueError("Expected input to contain less than {0} dims. " "Got {1} instead.".format(tgt, inp.dim()))
-
-
-def _assert_dim_ge(inp, tgt):
-    """Asserts that the number of dims in inp is greater than or equal to the
-    value sepecified in tgt.
-
-    Args:
-        inp (torch.Tensor): Input tensor, whose number of dimensions is
-            to be compared.
-        tgt (int): Value which the number of dims of inp should exceed.
-    """
-    if inp.dim() < tgt:
-        raise ValueError("Expected input to contain at least {0} dims. " "Got {1} instead.".format(tgt, inp.dim()))
-
-
-def _assert_dim_le(inp, tgt):
-    """Asserts that the number of dims in inp is less than or equal to the
-    value sepecified in tgt.
-
-    Args:
-        inp (torch.Tensor): Input tensor, whose number of dimensions is
-            to be compared.
-        tgt (int): Value which the number of dims of inp should not exceed.
-    """
-    if inp.dim() > tgt:
-        raise ValueError("Expected input to contain at most {0} dims. " "Got {1} instead.".format(tgt, inp.dim()))
-
-
-def _assert_dim_eq(inp, tgt):
-    """Asserts that the number of dims in inp is exactly equal to the value
-    sepecified in tgt.
-
-    Args:
-        inp (torch.Tensor): Input tensor, whose number of dimensions is
-            to be compared.
-        tgt (int): Value which the number of dims of inp should equal.
-    """
-    if inp.dim() != tgt:
-        raise ValueError("Expected input to contain exactly {0} dims. " "Got {1} instead.".format(tgt, inp.dim()))
-
-
-def _assert_shape_eq(inp, tgt_shape, dim=None):
-    """Asserts that the shape of tensor `inp` is equal to the tuple `tgt_shape`
-    along dimension `dim`.
-
-    If `dim` is None, shapes along all dimensions must be equal.
-    """
-    if dim is None:
-        if inp.shape != tgt_shape:
-            raise ValueError(
-                "Size mismatch. Input and target have different " "shapes: {0} vs {1}.".format(inp.shape, tgt_shape)
-            )
-    else:
-        if inp.shape[dim] != tgt_shape[dim]:
-            raise ValueError(
-                "Size mismatch. Input and target have different "
-                "shapes at dimension {2}: {0} vs {1}.".format(inp.shape[dim], tgt_shape[dim], dim)
-            )
-
-
-def _assert_gt(inp, val):
-    """Asserts that all elements in tensor `inp` are greater than value
-    `val`."""
-    if not (inp > val).all():
-        raise ValueError("Each element of input must be greater " "than {0}.".format(val))
-
-
-def _get_hash(x):
-    """Generate a hash from a string, or dictionary."""
-    if isinstance(x, dict):
-        x = tuple(sorted(pair for pair in x.items()))
-
-    return hashlib.md5(bytes(repr(x), "utf-8")).hexdigest()
-
-
-class Cache(object):
-    """Caches the results of a function to disk.
-
-    If already cached, data is returned from disk, otherwise,
-    the function is executed. Output tensors are always on CPU device.
-        Args:
-            transforms (Iterable): List of transforms to compose.
-            cache_dir (str): Directory where objects will be cached. Default
-                             to 'cache'.
-    """
-
-    def __init__(self, func: Callable, cache_dir: [str, Path], cache_key: str):
-        self.func = func
-        self.cache_dir = Path(cache_dir) / str(cache_key)
-        self.cache_dir.mkdir(parents=True, exist_ok=True)
-        self.cached_ids = [p.stem for p in self.cache_dir.glob("*")]
-
-    def __call__(self, unique_id: str, *args, **kwargs):
-        """Execute self.func if not cached, otherwise, read data from disk.
-        Args:
-            unique_id (str): The unique id with which to name the cached file.
-            **kwargs: The arguments to be passed to self.func.
-        Returns:
-            dict of {str: torch.Tensor}: Dictionary of tensors.
-        """
-
-        fpath = self.cache_dir / f"{unique_id}.p"
-
-        if not fpath.exists():
-            output = self.func(*args, **kwargs)
-            self._write(output, fpath)
-            self.cached_ids.append(unique_id)
-        else:
-            output = self._read(fpath)
-
-        # Read file to move tensors to CPU.
-        return self._read(fpath)
-
-    def _write(self, x, fpath):
-        torch.save(x, fpath)
-
-    def _read(self, fpath):
-        return torch.load(fpath, map_location="cpu")