Skip to content
Snippets Groups Projects
Commit 3d1018bf authored by JC's avatar JC
Browse files

Add render groundtruth depth option

parent caa3f56d
No related branches found
No related tags found
No related merge requests found
...@@ -67,6 +67,13 @@ enum class EMeshRenderMode : int { ...@@ -67,6 +67,13 @@ enum class EMeshRenderMode : int {
FaceIDs, FaceIDs,
}; };
enum class EGroundTruthRenderMode : int {
Shade,
Depth,
NumRenderModes,
};
static constexpr const char* GroundTruthRenderModeStr = "Shade\0Depth\0\0";
enum class ERenderMode : int { enum class ERenderMode : int {
AO, AO,
Shade, Shade,
......
...@@ -218,6 +218,17 @@ public: ...@@ -218,6 +218,17 @@ public:
cudaStream_t stream cudaStream_t stream
); );
void overlay_depth(
float alpha,
const float* __restrict__ depth,
float depth_scale,
const Eigen::Vector2i& resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
cudaStream_t stream
);
void overlay_false_color(Eigen::Vector2i training_resolution, bool to_srgb, int fov_axis, cudaStream_t stream, const float *error_map, Eigen::Vector2i error_map_resolution, const float *average, float brightness, bool viridis); void overlay_false_color(Eigen::Vector2i training_resolution, bool to_srgb, int fov_axis, cudaStream_t stream, const float *error_map, Eigen::Vector2i error_map_resolution, const float *average, float brightness, bool viridis);
SurfaceProvider& surface_provider() { SurfaceProvider& surface_provider() {
......
...@@ -426,6 +426,8 @@ public: ...@@ -426,6 +426,8 @@ public:
bool m_include_optimizer_state_in_snapshot = false; bool m_include_optimizer_state_in_snapshot = false;
bool m_render_ground_truth = false; bool m_render_ground_truth = false;
EGroundTruthRenderMode m_ground_truth_render_mode = EGroundTruthRenderMode::Shade;
bool m_train = false; bool m_train = false;
bool m_training_data_available = false; bool m_training_data_available = false;
bool m_render = true; bool m_render = true;
......
...@@ -619,7 +619,7 @@ NerfDataset load_nerf(const std::vector<filesystem::path>& jsonpaths, float shar ...@@ -619,7 +619,7 @@ NerfDataset load_nerf(const std::vector<filesystem::path>& jsonpaths, float shar
if (wa != dst.res.x() || ha != dst.res.y()) { if (wa != dst.res.x() || ha != dst.res.y()) {
throw std::runtime_error{std::string{"Depth image has wrong resolution: "} + depthpath.str()}; throw std::runtime_error{std::string{"Depth image has wrong resolution: "} + depthpath.str()};
} }
//tlog::success() << "Depth loaded from " << depthpath; tlog::success() << "Depth loaded from " << depthpath;
} }
} }
......
...@@ -227,6 +227,11 @@ PYBIND11_MODULE(pyngp, m) { ...@@ -227,6 +227,11 @@ PYBIND11_MODULE(pyngp, m) {
.value("Volume", ETestbedMode::Volume) .value("Volume", ETestbedMode::Volume)
.export_values(); .export_values();
py::enum_<EGroundTruthRenderMode>(m, "GroundTruthRenderMode")
.value("Shade", EGroundTruthRenderMode::Shade)
.value("Depth", EGroundTruthRenderMode::Depth)
.export_values();
py::enum_<ERenderMode>(m, "RenderMode") py::enum_<ERenderMode>(m, "RenderMode")
.value("AO", ERenderMode::AO) .value("AO", ERenderMode::AO)
.value("Shade", ERenderMode::Shade) .value("Shade", ERenderMode::Shade)
...@@ -422,6 +427,7 @@ PYBIND11_MODULE(pyngp, m) { ...@@ -422,6 +427,7 @@ PYBIND11_MODULE(pyngp, m) {
.def_readwrite("shall_train_encoding", &Testbed::m_train_encoding) .def_readwrite("shall_train_encoding", &Testbed::m_train_encoding)
.def_readwrite("shall_train_network", &Testbed::m_train_network) .def_readwrite("shall_train_network", &Testbed::m_train_network)
.def_readwrite("render_groundtruth", &Testbed::m_render_ground_truth) .def_readwrite("render_groundtruth", &Testbed::m_render_ground_truth)
.def_readwrite("groundtruth_render_mode", &Testbed::m_ground_truth_render_mode)
.def_readwrite("render_mode", &Testbed::m_render_mode) .def_readwrite("render_mode", &Testbed::m_render_mode)
.def_readwrite("slice_plane_z", &Testbed::m_slice_plane_z) .def_readwrite("slice_plane_z", &Testbed::m_slice_plane_z)
.def_readwrite("dof", &Testbed::m_dof) .def_readwrite("dof", &Testbed::m_dof)
......
...@@ -433,6 +433,55 @@ __device__ Array3f colormap_viridis(float x) { ...@@ -433,6 +433,55 @@ __device__ Array3f colormap_viridis(float x) {
return (c0+x*(c1+x*(c2+x*(c3+x*(c4+x*(c5+x*c6)))))); return (c0+x*(c1+x*(c2+x*(c3+x*(c4+x*(c5+x*c6))))));
} }
__global__ void overlay_depth_kernel(
Vector2i resolution,
float alpha,
const float* __restrict__ depth,
float depth_scale,
Vector2i image_resolution,
int fov_axis,
float zoom, Eigen::Vector2f screen_center,
cudaSurfaceObject_t surface
) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= resolution.x() || y >= resolution.y()) {
return;
}
float scale = image_resolution[fov_axis] / float(resolution[fov_axis]);
float fx = x+0.5f;
float fy = y+0.5f;
fx-=resolution.x()*0.5f; fx/=zoom; fx+=screen_center.x() * resolution.x();
fy-=resolution.y()*0.5f; fy/=zoom; fy+=screen_center.y() * resolution.y();
float u = (fx-resolution.x()*0.5f) * scale + image_resolution.x()*0.5f;
float v = (fy-resolution.y()*0.5f) * scale + image_resolution.y()*0.5f;
int srcx = floorf(u);
int srcy = floorf(v);
uint32_t idx = x + resolution.x() * y;
uint32_t srcidx = srcx + image_resolution.x() * srcy;
Array4f color;
if (srcx >= image_resolution.x() || srcy >= image_resolution.y() || srcx < 0 || srcy < 0) {
color = {0.0f, 0.0f, 0.0f, 0.0f};
} else {
float depth_value = depth[srcidx] * depth_scale;
Array3f c = colormap_turbo(depth_value);
color = {c[0], c[1], c[2], 1.0f};
}
Array4f prev_color;
surf2Dread((float4*)&prev_color, surface, x * sizeof(float4), y);
color = color * alpha + prev_color * (1.f-alpha);
surf2Dwrite(to_float4(color), surface, x * sizeof(float4), y);
}
__global__ void overlay_false_color_kernel(Vector2i resolution, Vector2i training_resolution, bool to_srgb, int fov_axis, cudaSurfaceObject_t surface, const float *error_map, Vector2i error_map_resolution, const float *average, float brightness, bool viridis) { __global__ void overlay_false_color_kernel(Vector2i resolution, Vector2i training_resolution, bool to_srgb, int fov_axis, cudaSurfaceObject_t surface, const float *error_map, Vector2i error_map_resolution, const float *average, float brightness, bool viridis) {
uint32_t x = threadIdx.x + blockDim.x * blockIdx.x; uint32_t x = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t y = threadIdx.y + blockDim.y * blockIdx.y; uint32_t y = threadIdx.y + blockDim.y * blockIdx.y;
...@@ -631,6 +680,32 @@ void CudaRenderBuffer::overlay_image( ...@@ -631,6 +680,32 @@ void CudaRenderBuffer::overlay_image(
); );
} }
void CudaRenderBuffer::overlay_depth(
float alpha,
const float* __restrict__ depth,
float depth_scale,
const Vector2i& image_resolution,
int fov_axis,
float zoom,
const Eigen::Vector2f& screen_center,
cudaStream_t stream
) {
auto res = out_resolution();
const dim3 threads = { 16, 8, 1 };
const dim3 blocks = { div_round_up((uint32_t)res.x(), threads.x), div_round_up((uint32_t)res.y(), threads.y), 1 };
overlay_depth_kernel<<<blocks, threads, 0, stream>>>(
res,
alpha,
depth,
depth_scale,
image_resolution,
fov_axis,
zoom,
screen_center,
surface()
);
}
void CudaRenderBuffer::overlay_false_color(Vector2i training_resolution, bool to_srgb, int fov_axis, cudaStream_t stream, const float* error_map, Vector2i error_map_resolution, const float* average, float brightness, bool viridis) { void CudaRenderBuffer::overlay_false_color(Vector2i training_resolution, bool to_srgb, int fov_axis, cudaStream_t stream, const float* error_map, Vector2i error_map_resolution, const float* average, float brightness, bool viridis) {
auto res = out_resolution(); auto res = out_resolution();
const dim3 threads = { 16, 8, 1 }; const dim3 threads = { 16, 8, 1 };
......
...@@ -594,6 +594,9 @@ void Testbed::imgui() { ...@@ -594,6 +594,9 @@ void Testbed::imgui() {
accum_reset |= ImGui::SliderInt("training image latent code for inference", (int*)&m_nerf.extra_dim_idx_for_inference, 0, m_nerf.training.dataset.n_images-1); accum_reset |= ImGui::SliderInt("training image latent code for inference", (int*)&m_nerf.extra_dim_idx_for_inference, 0, m_nerf.training.dataset.n_images-1);
} }
accum_reset |= ImGui::Combo("Render mode", (int*)&m_render_mode, RenderModeStr); accum_reset |= ImGui::Combo("Render mode", (int*)&m_render_mode, RenderModeStr);
if (m_testbed_mode == ETestbedMode::Nerf) {
accum_reset |= ImGui::Combo("Groundtruth Render mode", (int*)&m_ground_truth_render_mode, GroundTruthRenderModeStr);
}
accum_reset |= ImGui::Combo("Color space", (int*)&m_color_space, ColorSpaceStr); accum_reset |= ImGui::Combo("Color space", (int*)&m_color_space, ColorSpaceStr);
accum_reset |= ImGui::Combo("Tonemap curve", (int*)&m_tonemap_curve, TonemapCurveStr); accum_reset |= ImGui::Combo("Tonemap curve", (int*)&m_tonemap_curve, TonemapCurveStr);
accum_reset |= ImGui::ColorEdit4("Background", &m_background_color[0]); accum_reset |= ImGui::ColorEdit4("Background", &m_background_color[0]);
...@@ -2659,19 +2662,33 @@ void Testbed::render_frame(const Matrix<float, 3, 4>& camera_matrix0, const Matr ...@@ -2659,19 +2662,33 @@ void Testbed::render_frame(const Matrix<float, 3, 4>& camera_matrix0, const Matr
if (m_render_ground_truth) { if (m_render_ground_truth) {
float alpha=1.f; float alpha=1.f;
auto const& metadata = m_nerf.training.dataset.metadata[m_nerf.training.view]; auto const& metadata = m_nerf.training.dataset.metadata[m_nerf.training.view];
render_buffer.overlay_image( if(m_ground_truth_render_mode == EGroundTruthRenderMode::Shade) {
alpha, render_buffer.overlay_image(
Array3f::Constant(m_exposure) + m_nerf.training.cam_exposure[m_nerf.training.view].variable(), alpha,
m_background_color, Array3f::Constant(m_exposure) + m_nerf.training.cam_exposure[m_nerf.training.view].variable(),
to_srgb ? EColorSpace::SRGB : EColorSpace::Linear, m_background_color,
metadata.pixels, to_srgb ? EColorSpace::SRGB : EColorSpace::Linear,
metadata.image_data_type, metadata.pixels,
metadata.resolution, metadata.image_data_type,
m_fov_axis, metadata.resolution,
m_zoom, m_fov_axis,
Vector2f::Constant(0.5f), m_zoom,
m_inference_stream Vector2f::Constant(0.5f),
); m_inference_stream
);
}
else if(m_ground_truth_render_mode == EGroundTruthRenderMode::Depth && metadata.depth) {
render_buffer.overlay_depth(
alpha,
metadata.depth,
1.0f/m_nerf.training.dataset.scale,
metadata.resolution,
m_fov_axis,
m_zoom,
Vector2f::Constant(0.5f),
m_inference_stream
);
}
} }
// Visualize the accumulated error map if requested // Visualize the accumulated error map if requested
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment