diff --git a/docs/assets/cloud.mp4 b/docs/assets/cloud.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..a200b9564e16213b7a456957fd802aa484f51526
Binary files /dev/null and b/docs/assets/cloud.mp4 differ
diff --git a/docs/assets/cloud_training.mp4 b/docs/assets/cloud_training.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..1fe34e8e7f9c3cbc4b598b728b3f56b8ab75ad94
Binary files /dev/null and b/docs/assets/cloud_training.mp4 differ
diff --git a/docs/assets/fox.mp4 b/docs/assets/fox.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..31cf702d4b6b0efb9122e974b645d97b84a7e16d
Binary files /dev/null and b/docs/assets/fox.mp4 differ
diff --git a/docs/assets/gargoyle_loop_base.mp4 b/docs/assets/gargoyle_loop_base.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..da28e04b239fb0330d577e3ec8ab987472582519
Binary files /dev/null and b/docs/assets/gargoyle_loop_base.mp4 differ
diff --git a/docs/assets/gargoyle_loop_base_14.mp4 b/docs/assets/gargoyle_loop_base_14.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..3e649eb16fc334e56858a851bf168a614ac47593
Binary files /dev/null and b/docs/assets/gargoyle_loop_base_14.mp4 differ
diff --git a/docs/assets/gargoyle_loop_densegrid.mp4 b/docs/assets/gargoyle_loop_densegrid.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..e360bdfd663b0ff4aee8657e2777af0813d74efd
Binary files /dev/null and b/docs/assets/gargoyle_loop_densegrid.mp4 differ
diff --git a/docs/assets/gargoyle_loop_densegrid_1res.mp4 b/docs/assets/gargoyle_loop_densegrid_1res.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..7cfaaba188aaadff895c3d51b2c596f699f49f86
Binary files /dev/null and b/docs/assets/gargoyle_loop_densegrid_1res.mp4 differ
diff --git a/docs/assets/gargoyle_loop_frequency.mp4 b/docs/assets/gargoyle_loop_frequency.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..0b6fdc9da6d803b3b0c37de47eaf438b6a7fdc62
Binary files /dev/null and b/docs/assets/gargoyle_loop_frequency.mp4 differ
diff --git a/docs/assets/gargoyle_loop_none.mp4 b/docs/assets/gargoyle_loop_none.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..8409fae9eaa50df5ddc06ead1cf75f94ee18bc6c
Binary files /dev/null and b/docs/assets/gargoyle_loop_none.mp4 differ
diff --git a/docs/assets/lucy.mp4 b/docs/assets/lucy.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..1094724be62d038879d76cb2ce54e72a0aee74f6
Binary files /dev/null and b/docs/assets/lucy.mp4 differ
diff --git a/docs/assets/modsynth.mp4 b/docs/assets/modsynth.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..d189ddcfb8e9aec09b1c09cf03b8ef519d9f310f
Binary files /dev/null and b/docs/assets/modsynth.mp4 differ
diff --git a/docs/assets/nerf_grid_lq.mp4 b/docs/assets/nerf_grid_lq.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..505cae8e4eb080803c8d611f0527e8039704e1d5
Binary files /dev/null and b/docs/assets/nerf_grid_lq.mp4 differ
diff --git a/docs/assets/nrc.mp4 b/docs/assets/nrc.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..72986ef7167446b5277b16138976656e29bdde37
Binary files /dev/null and b/docs/assets/nrc.mp4 differ
diff --git a/docs/assets/nrc_hashgrid.mp4 b/docs/assets/nrc_hashgrid.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..86700d17b9ea2c8891f5d5d5bfff9341be01f796
Binary files /dev/null and b/docs/assets/nrc_hashgrid.mp4 differ
diff --git a/docs/assets/nrc_new_vs_old.mp4 b/docs/assets/nrc_new_vs_old.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..240c87a8b9fa2d072132c69c55835d765f00f16f
Binary files /dev/null and b/docs/assets/nrc_new_vs_old.mp4 differ
diff --git a/docs/assets/nrc_trianglewave.mp4 b/docs/assets/nrc_trianglewave.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..80cd4a3e22238c68c70108a9ca33d3b5ce0ae224
Binary files /dev/null and b/docs/assets/nrc_trianglewave.mp4 differ
diff --git a/docs/assets/paper-thumbnail.png b/docs/assets/paper-thumbnail.png
new file mode 100644
index 0000000000000000000000000000000000000000..46f034065b0f8df0d07baf9730c195cdefba179e
Binary files /dev/null and b/docs/assets/paper-thumbnail.png differ
diff --git a/docs/assets/pearl.mp4 b/docs/assets/pearl.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..50606db9091d40fe2f0a2d646483a1c751492ab6
Binary files /dev/null and b/docs/assets/pearl.mp4 differ
diff --git a/docs/assets/pearl2.mp4 b/docs/assets/pearl2.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..bfe78a5339034e50014a60585012d570007ecb75
Binary files /dev/null and b/docs/assets/pearl2.mp4 differ
diff --git a/docs/assets/robot.mp4 b/docs/assets/robot.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..126cd20e3908a0acda58fe593ede6b2d4177fcc6
Binary files /dev/null and b/docs/assets/robot.mp4 differ
diff --git a/docs/assets/sdf_grid_lq.mp4 b/docs/assets/sdf_grid_lq.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..269521d9d9e8a254c7c8894f70e97024c929981d
Binary files /dev/null and b/docs/assets/sdf_grid_lq.mp4 differ
diff --git a/docs/assets/teaser.mp4 b/docs/assets/teaser.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..de4d00b733dd56473c3bd2c6f32fbf0e270f3bcf
Binary files /dev/null and b/docs/assets/teaser.mp4 differ
diff --git a/docs/assets/teaser_small.jpg b/docs/assets/teaser_small.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..4a79689d0650db550366e38d7277f83f67a4c042
Binary files /dev/null and b/docs/assets/teaser_small.jpg differ
diff --git a/docs/assets/tokyo_online_training.mp4 b/docs/assets/tokyo_online_training.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..71c8e87f05d40c1b475b9f080c5fffce89d994ae
Binary files /dev/null and b/docs/assets/tokyo_online_training.mp4 differ
diff --git a/docs/assets/tokyo_online_training_counter.mp4 b/docs/assets/tokyo_online_training_counter.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..19814c1ac6ca220dbd419328418d4ca0cd98aaf9
Binary files /dev/null and b/docs/assets/tokyo_online_training_counter.mp4 differ
diff --git a/docs/assets/twitter.jpg b/docs/assets/twitter.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d30b13b2f8ae37dc2a7ddcce262bbd0ae95f8cb3
Binary files /dev/null and b/docs/assets/twitter.jpg differ
diff --git a/docs/index.html b/docs/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..9a13b31c524e1aad262a8c94d1e58fb68abed15c
--- /dev/null
+++ b/docs/index.html
@@ -0,0 +1,522 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<meta charset="utf-8">
+
+<html>
+
+<script src="http://www.google.com/jsapi" type="text/javascript"></script>
+<script type="text/javascript">google.load("jquery", "1.3.2");</script>
+
+<style type="text/css">
+body {
+	font-family: "Titillium Web", "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif;
+	font-weight: 300;
+	font-size: 17px;
+	margin-left: auto;
+	margin-right: auto;
+	width: 980px;
+}
+h1 {
+	font-weight:300;
+	line-height: 1.15em;
+}
+
+h2 {
+	font-size: 1.75em;
+}
+a:link,a:visited {
+	color: #1367a7;
+	text-decoration: none;
+}
+a:hover {
+	color: #208799;
+}
+h1, h2, h3 {
+	text-align: center;
+}
+h1 {
+	font-size: 40px;
+	font-weight: 500;
+}
+h2, h3 {
+	font-weight: 400;
+	margin: 16px 0px 4px 0px;
+}
+.paper-title {
+	padding: 16px 0px 16px 0px;
+}
+section {
+	margin: 32px 0px 32px 0px;
+	text-align: justify;
+	clear: both;
+}
+.col-5 {
+	 width: 20%;
+	 float: left;
+}
+.col-4 {
+	 width: 25%;
+	 float: left;
+}
+.col-2 {
+	 width: 50%;
+	 float: left;
+}
+.row, .author-row, .affil-row {
+	 overflow: auto;
+}
+.author-row, .affil-row {
+	font-size: 20px;
+}
+.row {
+	margin: 16px 0px 16px 0px;
+}
+.authors {
+	font-size: 18px;
+}
+.affil-row {
+	margin-top: 16px;
+}
+.teaser {
+	max-width: 100%;
+}
+.text-center {
+	text-align: center;
+}
+.screenshot {
+	width: 256px;
+	border: 1px solid #ddd;
+}
+.screenshot-el {
+	margin-bottom: 16px;
+}
+hr {
+	height: 1px;
+	border: 0;
+	border-top: 1px solid #ddd;
+	margin: 0;
+}
+.material-icons {
+	vertical-align: -6px;
+}
+p {
+	line-height: 1.25em;
+}
+.caption_justify {
+	font-size: 16px;
+	/*font-style: italic;*/
+	color: #666;
+	text-align: justify;
+	margin-top: 0px;
+	margin-bottom: 64px;
+}
+.caption {
+	font-size: 16px;
+	/*font-style: italic;*/
+	color: #666;
+	text-align: center;
+	margin-top: 8px;
+	margin-bottom: 64px;
+}
+.caption_inline {
+	font-size: 16px;
+	/*font-style: italic;*/
+	color: #666;
+	text-align: center;
+	margin-top: 8px;
+	margin-bottom: 0px;
+}
+.caption_bold {
+	font-size: 16px;
+	/*font-style: italic;*/
+	color: #666;
+	text-align: center;
+	margin-top: 0px;
+	margin-bottom: 0px;
+	font-weight: bold;
+}
+video {
+	display: block;
+	margin: auto;
+}
+figure {
+	display: block;
+	margin: auto;
+	margin-top: 10px;
+	margin-bottom: 10px;
+}
+figure {
+	display: block;
+	margin: auto;
+	margin-top: 10px;
+	margin-bottom: 10px;
+}
+#bibtex pre {
+	font-size: 14px;
+	background-color: #eee;
+	padding: 16px;
+}
+.blue {
+	color: #2c82c9;
+	font-weight: bold;
+}
+.orange {
+	color: #d35400;
+	font-weight: bold;
+}
+.flex-row {
+	display: flex;
+	flex-flow: row wrap;
+	justify-content: space-around;
+	padding: 0;
+	margin: 0;
+	list-style: none;
+}
+.paper-btn {
+  position: relative;
+  text-align: center;
+
+  display: inline-block;
+  margin: 8px;
+  padding: 8px 8px;
+
+  border-width: 0;
+  outline: none;
+  border-radius: 2px;
+
+  background-color: #1367a7;
+  color: #ecf0f1 !important;
+  font-size: 20px;
+  width: 100px;
+  font-weight: 600;
+}
+.paper-btn-parent {
+	display: flex;
+	justify-content: center;
+	margin: 16px 0px;
+}
+.paper-btn:hover {
+	opacity: 0.85;
+}
+.container {
+	margin-left: auto;
+	margin-right: auto;
+	padding-left: 16px;
+	padding-right: 16px;
+}
+.venue {
+	color: #1367a7;
+}
+
+</style>
+<!--<script type="text/javascript" src="../js/hidebib.js"></script>-->
+	<link href='https://fonts.googleapis.com/css?family=Titillium+Web:400,600,400italic,600italic,300,300italic' rel='stylesheet' type='text/css'>
+	<head>
+		<title>Instant Neural Graphics Primitives with a Multiresolution Hash Encoding</title>
+		<meta property="og:description" content="Instant Neural Graphics Primitives with a Multiresolution Hash Encoding"/>
+		<link href="https://fonts.googleapis.com/css2?family=Material+Icons" rel="stylesheet">
+
+		<meta name="twitter:card" content="summary_large_image">
+		<meta name="twitter:creator" content="@mmalex">
+		<meta name="twitter:title" content="Instant Neural Graphics Primitives with a Multiresolution Hash Encoding">
+		<meta name="twitter:description" content="A new paper from NVIDIA Research which presents a method for instant training & rendering of high-quality neural graphics primitives.">
+		<meta name="twitter:image" content="https://nvlabs.github.io/instant-ngp/assets/twitter.jpg"> -->
+	</head>
+
+ <body>
+<div class="container">
+	<div class="paper-title">
+	  <h1>Instant Neural Graphics Primitives with a Multiresolution Hash Encoding</h1>
+	</div>
+
+	<div id="authors">
+		<div class="author-row">
+			<div class="col-4 text-center"><a href="https://tom94.net">Thomas Müller</a></div>
+			<div class="col-4 text-center"><a href="https://research.nvidia.com/person/alex-evans">Alex Evans</a></div>
+			<div class="col-4 text-center"><a href="https://research.nvidia.com/person/christoph-schied">Christoph Schied</a></div>
+			<div class="col-4 text-center"><a href="https://research.nvidia.com/person/alex-keller">Alexander Keller</a></div>
+		</div>
+
+		<div class="affil-row">
+			<div class="col-1 text-center">NVIDIA</div>
+		</div>
+		<!-- <div class="affil-row">
+			<div class="venue text-center"><b>arXiv</b></div>
+		</div> -->
+
+		<div style="clear: both">
+			<div class="paper-btn-parent">
+			<a class="paper-btn" href="assets/mueller2022instant.pdf">
+				<span class="material-icons"> description </span>
+				 Paper
+			</a>
+			<a class="paper-btn" href="assets/mueller2022instant.mp4">
+				<span class="material-icons"> videocam </span>
+				 Video
+			</a>
+			<a class="paper-btn" href="https://github.com/NVlabs/instant-ngp">
+				<span class="material-icons"> code </span>
+				 Code
+			</a>
+		</div></div>
+	</div>
+
+	<section id="teaser-videos">
+		<figure style="width: 25%; float: left">
+			<p class="caption_bold">
+				Neural gigapixel images
+			</p>
+		</figure>
+
+		<figure style="width: 25%; float: left">
+			<p class="caption_bold">
+				Neural SDF
+			</p>
+		</figure>
+
+		<figure style="width: 25%; float: left">
+			<p class="caption_bold">
+				Neural radiance field
+			</p>
+		</figure>
+
+		<figure style="width: 25%; float: left">
+			<p class="caption_bold">
+				Neural volume
+			</p>
+		</figure>
+		<figure style="width: 100%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/teaser.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+		</figure>
+
+
+		<figure style="width: 100%; float: left">
+			<p class="caption_justify">
+				We demonstrate near-instant training of neural graphics primitives on a single GPU for multiple tasks. In <b>Gigapixel image</b> we represent an image by a neural network. <b>SDF</b> learns a signed distance function in 3D space whose zero level-set represents a 2D surface.
+				<!--<b>Neural radiance caching</b> (NRC) <a href="https://research.nvidia.com/publication/2021-06_Real-time-Neural-Radiance">[Müller et al. 2021]</a> employs a neural network that is trained in real-time to cache costly lighting calculations-->
+				<b>NeRF</b> <a href="https://research.nvidia.com/publication/2021-06_Real-time-Neural-Radiance">[Mildenhall et al. 2020]</a> uses 2D images and their camera poses to reconstruct a volumetric radiance-and-density field that is visualized using ray marching.
+				Lastly, <b>Neural volume</b> learns a denoised radiance and density field directly from a volumetric path tracer.
+				In all tasks, our encoding and its efficient implementation provide clear benefits: near-instant training, high quality, and simplicity. Our encoding is task-agnostic: we use the same implementation and hyperparameters across all tasks and only vary the hash table size which trades off quality and performance.
+			</p>
+		</figure>
+	</section>
+
+
+	<section id="news">
+		<h2>News</h2>
+		<hr>
+		<div class="row">
+			<div><span class="material-icons"> integration_instructions </span> [Jan 2022] Code released on <a href="https://github.com/NVlabs/instant-ngp">GitHub</a>.</div>
+			<div><span class="material-icons"> description </span> [Jan 2022] Paper released on <a href="https://arxiv.org/abs/XXX">arXiv</a>.</div>
+		</div>
+	</section>
+
+	<section id="abstract"/>
+		<h2>Abstract</h2>
+		<hr>
+		<p>
+			Neural graphics primitives, parameterized by fully connected neural networks, can be costly to train and evaluate. We reduce this cost with a versatile new input encoding that permits the use of a smaller network without sacrificing quality, thus significantly reducing the number of floating point and memory access operations. A small neural network is augmented by a multiresolution hash table of trainable feature vectors whose values are optimized through stochastic gradient descent. The multiresolution structure allows the network to disambiguate hash collisions, making for a simple architecture that is trivial to parallelize on modern GPUs. We leverage this parallelism by implementing the whole system using fully-fused CUDA kernels with a focus on minimizing wasted bandwidth and compute operations. We achieve a combined speedup of several orders of magnitude, enabling training of high-quality neural graphics primitives in a matter of seconds, and rendering in tens of milliseconds at a resolution of 1920x1080.
+		</p>
+	</section>
+
+	<section id="results">
+		<h2>Results</h2>
+		<hr>
+
+		<h3>Gigapixel Image</h3>
+		<hr>
+
+		<figure>
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/tokyo_online_training_counter.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Real-time training progress on the image task where the MLP learns the mapping from 2D coordinates to RGB colors of a high-resolution image. Note that in this video, the network is trained from scratch - but converges so quickly you may miss it if you blink! <br/> 			</p>
+		</figure>
+
+		<h3>Neural Radiance Fields</h3>
+		<hr>
+		<figure style="width: 20.0%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_none.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption_bold">(a) None</p>
+			<p class="caption_inline">411k parameters</br>10:45 (mm:ss)</p>
+		</figure>
+		<!--
+		<figure style="width: 20.0%; float: left">
+			<p class="caption_bold">Dense grid </p>
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_densegrid_1res.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+		</figure>
+		-->
+		<figure style="width: 20.0%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_densegrid.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption_bold">(b) Multiresolution grid</p>
+			<p class="caption_inline">10k + 16.3M parameters</br>1:26 (mm:ss)</p>
+		</figure>
+		<figure style="width: 20.0%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_frequency.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption_bold">(c) Frequency</p>
+			<p class="caption_inline">438k + 0 parameters</br>13:53 (mm:ss)</p>
+		</figure>
+		<figure style="width: 20.0%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_base_14.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption_bold">(d) Hashtable (T=2<sup>14</sup>)</p>
+			<p class="caption_inline">10k + 494k parameters</br>1:40 (mm:ss)</p>
+		</figure>
+		<figure style="width: 20.0%; float: left">
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/gargoyle_loop_base.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption_bold">(e) Hashtable (T=2<sup>19</sup>)</p>
+			<p class="caption_inline">10k + 12.6M parameters</br>1:45 (mm:ss)</p>
+		</figure>
+		<p class="caption_justify">
+			A demonstration of the reconstruction quality of different encodings and parametric data structures for storing trainable feature embeddings. Each configuration was trained for 11000 steps using our fast NeRF implementation, varying only the input encoding. The number of trainable parameters (MLP weights + encoding parameters) and training time are shown below each image. Our encoding (d) with a similar total number of trainable parameters as the frequency encoding (c) trains over 8 times faster, due to the sparsity of updates to the parameters and smaller MLP. Increasing the number of parameters (e) further improves approximation quality without significantly increasing training time.
+		</p>
+		<figure>
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/nerf_grid_lq.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Real-time training progress on eight synthetic NeRF datsets.
+			</p>
+		</figure>
+
+		<figure style="width: 50.0%; float: left">
+			<video class="centered" width="100.0%" autoplay muted loop playsinline>
+				<source src="assets/robot.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Fly-through in a trained Neural Radiance Field. Large, natural 360 scenes are well supported.
+			</p>
+		</figure>
+
+		<figure style="width: 50.0%; float: left">
+			<video class="centered" width="100.0%" autoplay muted loop playsinline>
+				<source src="assets/modsynth.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Fly-through in a trained Neural Radiance Field.
+				Despite being trained from just 34 photos, this complex scene with many disocclusions and specular surfaces is well reconstructed.
+			</p>
+		</figure>
+
+		<figure style="width: 50.0%;">
+			<video class="centered" width="100.0%" autoplay muted loop playsinline>
+				<source src="assets/cloud_training.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				We train NeRF-like radiance fields from the noisy output of a volumetric path tracer. Rays are fed in real-time to the network during training, which learns a denoised radiance field.
+			</p>
+		</figure>
+
+		<h3>Signed Distance Function</h3>
+		<hr>
+		<figure style="width: 50.0%;">
+			<video class="centered" width="100.0%" autoplay muted loop playsinline>
+				<source src="assets/sdf_grid_lq.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Real-time training progress on various SDF datsets. Training data is generated on the fly from the ground-truth mesh using the NVIDIA OptiX raytracing framework.
+			</p>
+		</figure>
+
+		<h3>Neural Radiance Cache</h3>
+		<hr>
+		<figure>
+			<video class="centered" width="100%" autoplay muted loop playsinline>
+				<source src="assets/nrc_new_vs_old.mp4" type="video/mp4">
+				Your browser does not support the video tag.
+			</video>
+			<p class="caption">
+				Direct visualization of a Neural Radiance Cache, in which the network preducts outgoing radiance at the first non-specular vertex of each pixel's path, and is trained on-line from rays generated by a realtime path-tracer. On the left, we show results using the triangular encoding of [Müller et al. 2021]; on the right, the new Multiresolution Hash Encoding allows the network to learn much sharper details, for example in the shadow regions.
+			</p>
+		</figure>
+
+	</section>
+
+	<section id="paper">
+		<h2>Paper</h2>
+		<hr>
+		<div class="flex-row">
+			<div style="box-sizing: border-box; padding: 16px; margin: auto;">
+				<a href="assets/mueller2022instant.pdf"><img class="screenshot" src="assets/paper-thumbnail.png"></a>
+			</div>
+			<div style="width: 60%">
+				<p><b>Instant Neural Graphics Primitives with a Multiresolution Hash Encoding</b></p>
+				<p>Thomas Müller, Alex Evans, Christoph Schied, Alexander Keller</p>
+
+				<div><span class="material-icons"> description </span><a href="assets/mueller2022instant.pdf"> Paper preprint (PDF, 6.2 MB)</a></div>
+				<!-- <div><span class="material-icons"> description </span><a href="https://arxiv.org/abs/xxxx.xxxxx"> arXiv version</a></div> -->
+				<div><span class="material-icons"> insert_comment </span><a href="assets/mueller2022instant.bib"> BibTeX</a></div>
+				<div><span class="material-icons"> integration_instructions </span><a href="https://github.com/NVlabs/instant-ngp"> Code</a></div>
+
+				<p>Please send feedback and questions to <a href="https://tom94.net">Thomas Müller</a></p>
+			</div>
+		</div>
+	</section>
+
+	<section id="bibtex">
+		<h2>Citation</h2>
+		<hr>
+		<pre><code>@article{mueller2022instant,
+	title = {Instant Neural Graphics Primitives with a Multiresolution Hash Encoding},
+	author = {Thomas M\"uller and Alex Evans and Christoph Schied and Alexander Keller},
+	journal = {arXiv:XXX},
+	year = {2022},
+	month = jan
+}
+</code></pre>
+	</section>
+
+	<section id="acknowledgements">
+		<h2>Acknowledgements</h2>
+		<hr>
+		<div class="row">
+			<p>
+			We would like to thank
+			<a href="https://tovacinni.github.io">Towaki Takikawa</a>,
+			<a href="https://luebke.us/">David Luebke</a>,
+			<a href="https://luminohope.org/">Koki Nagano</a> and
+			<a href="https://research.nvidia.com/person/nikolaus-binder">Nikolaus Binder</a> for profound discussions, and
+			<a href="https://anjulpatney.com/">Anjul Patney</a>,
+			<a href="https://research.nvidia.com/person/jacob-munkberg">Jacob Munkberg</a>,
+			<a href="http://granskog.xyz/">Jonathan Granskog</a>,
+			<a href="https://research.nvidia.com/person/marco-salvi">Marco Salvi</a>,
+			<a href="https://www.cs.toronto.edu/~jlucas/">James Lucas</a> and
+			<a href="https://tovacinni.github.io">Towaki Takikawa</a>
+			for proof-reading and feedback.
+			We also thank <a href="https://tovacinni.github.io">Towaki Takikawa</a> for providing us with the framework for this website.
+			<br/>
+			<em>Girl With a Pearl Earing</em> renovation by Koorosh Orooj <a href="http://profoundism.com/free_licenses.html">(CC BY-SA 4.0 License)</a>
+			<br/>
+			<em>Lucy</em> model from the <a href="http://graphics.stanford.edu/data/3Dscanrep/">Stanford 3D scan repository</a>
+			</p>
+		</div>
+	</section>
+</div>
+</body>
+</html>
diff --git a/src/testbed_volume.cu b/src/testbed_volume.cu
index 88824da9c8eaffdda475faf63bc8f3bb6a5f7c4a..f39e7c369b16e2e20df122ee921d22afaba50938 100644
--- a/src/testbed_volume.cu
+++ b/src/testbed_volume.cu
@@ -55,8 +55,7 @@ __device__ Array4f proc_envmap_render(const Vector3f& dir, const Vector3f& up_di
 	// actual sunsky model that we trained from.
 	Array4f result = Array4f::Zero();
 
-	// result.head<3>() = proc_envmap(dir, up_dir, sun_dir, skycol);
-	// result.w() = 1.0f;
+	result = proc_envmap(dir, up_dir, sun_dir, skycol);
 
 	return result;
 }