diff --git a/src/testbed_nerf.cu b/src/testbed_nerf.cu
index df70273b9f31c12200cf58778476c7288afda934..b0adcfd3e4f2891f78d365d8ec5de9d7ea8939e1 100644
--- a/src/testbed_nerf.cu
+++ b/src/testbed_nerf.cu
@@ -68,7 +68,7 @@ static constexpr uint32_t MAX_STEPS_INBETWEEN_COMPACTION = 8;
 Testbed::NetworkDims Testbed::network_dims_nerf() const {
 	NetworkDims dims;
 	dims.n_input = sizeof(NerfCoordinate) / sizeof(float);
-	dims.n_output = 4;
+	dims.n_output = 4; // TODO: this is probably the RGBA stuff
 	dims.n_pos = sizeof(NerfPosition) / sizeof(float);
 	return dims;
 }
@@ -91,6 +91,12 @@ inline __host__ __device__ float calc_dt(float t, float cone_angle) {
 }
 
 struct LossAndGradient {
+	// TODO:
+	// It seems to be Vector3f for positions, and Array3f for RGB
+	// Accordingly, it might just be a case of converting
+	// the colourspace from Array3f to float
+	// Alternatively, it could just be a case of requiring all the
+	// RGB components to be identical - actually, we just ignore 1 and 2
 	Eigen::Array3f loss;
 	Eigen::Array3f gradient;
 
@@ -115,7 +121,7 @@ inline __device__ LossAndGradient l2_loss(const Array3f& target, const Array3f&
 	Array3f difference = prediction - target;
 	return {
 		difference * difference,
-		2.0f * difference
+		2.0f * difference // autodiff
 	};
 }
 
@@ -218,15 +224,16 @@ __device__ float network_to_rgb(float val, ENerfActivation activation) {
 	return 0.0f;
 }
 
+// No way to modify the derivative for rgb
 __device__ float network_to_rgb_derivative(float val, ENerfActivation activation) {
-	switch (activation) {
-		case ENerfActivation::None: return 1.0f;
-		case ENerfActivation::ReLU: return val > 0.0f ? 1.0f : 0.0f;
-		case ENerfActivation::Logistic: { float density = tcnn::logistic(val); return density * (1 - density); };
-		case ENerfActivation::Exponential: return __expf(tcnn::clamp(val, -10.0f, 10.0f));
-		default: assert(false);
-	}
 	return 0.0f;
+	// switch (activation) {
+	// 	case ENerfActivation::None: return 1.0f;
+	// 	case ENerfActivation::ReLU: return val > 0.0f ? 1.0f : 0.0f;
+	// 	case ENerfActivation::Logistic: { float density = tcnn::logistic(val); return density * (1 - density); };
+	// 	case ENerfActivation::Exponential: return __expf(tcnn::clamp(val, -10.0f, 10.0f));
+	// 	default: assert(false);
+	// }
 }
 
 __device__ float network_to_density(float val, ENerfActivation activation) {
@@ -251,11 +258,12 @@ __device__ float network_to_density_derivative(float val, ENerfActivation activa
 	return 0.0f;
 }
 
+// Ignore neurons 0, 1 and 2!
 __device__ Array3f network_to_rgb(const tcnn::vector_t<tcnn::network_precision_t, 4>& local_network_output, ENerfActivation activation) {
 	return {
-		network_to_rgb(float(local_network_output[0]), activation),
-		network_to_rgb(float(local_network_output[1]), activation),
-		network_to_rgb(float(local_network_output[2]), activation)
+		network_to_rgb(0.0f, activation),
+		network_to_rgb(0.0f, activation),
+		network_to_rgb(0.0f, activation)
 	};
 }
 
@@ -3446,6 +3454,7 @@ void Testbed::compute_mesh_vertex_colors() {
 		GPUMatrix<float> color_matrix(mlp_out.data(), 4, n_verts);
 		linear_kernel(generate_nerf_network_inputs_from_positions, 0, m_stream.get(), n_verts, m_aabb, m_mesh.verts.data(), PitchedPtr<NerfCoordinate>((NerfCoordinate*)coords.data(), 1, 0, extra_stride), extra_dims_gpu);
 		m_network->inference(m_stream.get(), positions_matrix, color_matrix);
+		// TODO: magic number 3?  Colours?
 		linear_kernel(extract_srgb_with_activation, 0, m_stream.get(), n_verts * 3, 3, mlp_out.data(), (float*)m_mesh.vert_colors.data(), m_nerf.rgb_activation, m_nerf.training.linear_colors);
 	}
 }