diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 10a68fad4e515422df8c25fcd9192aa5291fc0a1..292180bbe0cbc4647db439fed9c2c5f4c885c7f3 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -144,4 +144,4 @@ jobs:
         uses: actions/upload-artifact@v3
         with:
           name: Windows binaries for ${{ matrix.recommended_gpus }}
-          path: ${{ env.build_dir }}/testbed.exe
+          path: ${{ env.build_dir }}/instant-ngp.exe
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cb503c8df9ff8a33d0f8702747966aa8d53707b1..57f6840b2044f10585874525f8ce79a475e1d5d6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -10,7 +10,7 @@ cmake_minimum_required(VERSION 3.18)
 
 project(instant-ngp
 	VERSION 1.0
-	DESCRIPTION "Instant neural graphics primitives"
+	DESCRIPTION "Instant Neural Graphics Primitives"
 	LANGUAGES C CXX CUDA
 )
 set(NGP_VERSION "${CMAKE_PROJECT_VERSION}")
@@ -116,8 +116,8 @@ if (NGP_BUILD_WITH_GUI)
 		set(NGP_VULKAN OFF)
 		if (NGP_BUILD_WITH_VULKAN)
 			message(WARNING
-				"Vulkan was not found. Neural graphics primitives will still compile "
-				"and run correctly, but DLSS will not be supported."
+				"Vulkan was not found. Instant neural graphics primitives will still "
+				"compile and run correctly, but DLSS will not be supported."
 			)
 		endif()
 	endif()
@@ -279,8 +279,8 @@ target_include_directories(ngp PUBLIC ${NGP_INCLUDE_DIRECTORIES})
 target_link_directories(ngp PUBLIC ${NGP_LINK_DIRECTORIES})
 target_link_libraries(ngp PUBLIC ${NGP_LIBRARIES} tiny-cuda-nn)
 
-add_executable(testbed src/main.cu)
-target_link_libraries(testbed PRIVATE ngp)
+add_executable(instant-ngp src/main.cu)
+target_link_libraries(instant-ngp PRIVATE ngp)
 
 # Copy DLSS shared libraries
 if (NGP_VULKAN)
@@ -291,14 +291,14 @@ if (NGP_VULKAN)
 	endif()
 
 	if (MSVC)
-		add_custom_command(TARGET testbed POST_BUILD
-			COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/dlss/lib/Windows_x86_64/${NGX_BUILD_DIR}/nvngx_dlss.dll" $<TARGET_FILE_DIR:testbed>
+		add_custom_command(TARGET instant-ngp POST_BUILD
+			COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/dlss/lib/Windows_x86_64/${NGX_BUILD_DIR}/nvngx_dlss.dll" $<TARGET_FILE_DIR:instant-ngp>
 			COMMAND_EXPAND_LISTS
 		)
 	else()
 		file(GLOB DLSS_SOS "${CMAKE_CURRENT_SOURCE_DIR}/dependencies/dlss/lib/Linux_x86_64/${NGX_BUILD_DIR}/libnvidia-ngx-dlss.so.*")
-		add_custom_command(TARGET testbed POST_BUILD
-			COMMAND ${CMAKE_COMMAND} -E copy ${DLSS_SOS} $<TARGET_FILE_DIR:testbed>
+		add_custom_command(TARGET instant-ngp POST_BUILD
+			COMMAND ${CMAKE_COMMAND} -E copy ${DLSS_SOS} $<TARGET_FILE_DIR:instant-ngp>
 			COMMAND_EXPAND_LISTS
 		)
 	endif()
diff --git a/README.md b/README.md
index e8ffc1cccf98b9448990376986c77c8f22e7116e..d8d52ab40d584f07072533f61db21225c05c4fa7 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@ instant-ngp$ cmake --build build --config RelWithDebInfo -j
 
 If the build fails, please consult [this list of possible fixes](https://github.com/NVlabs/instant-ngp#troubleshooting-compile-errors) before opening an issue.
 
-If the build succeeds, you can now run the code via the `build/testbed` executable or the `scripts/run.py` script described below.
+If the build succeeds, you can now run the code via the `build/instant-ngp` executable or the `scripts/run.py` script described below.
 
 If automatic GPU architecture detection fails, (as can happen if you have multiple GPUs installed), set the `TCNN_CUDA_ARCHITECTURES` environment variable for the GPU you would like to use. The following table lists the values for common GPUs. If your GPU is not listed, consult [this exhaustive list](https://developer.nvidia.com/cuda-gpus).
 
@@ -84,7 +84,7 @@ If automatic GPU architecture detection fails, (as can happen if you have multip
 
 <img src="docs/assets_readme/testbed.png" width="100%"/>
 
-This codebase comes with an interactive testbed that includes many features beyond our academic publication:
+This codebase comes with an interactive GUI that includes many features beyond our academic publication:
 - Additional training features, such as extrinsics and intrinsics optimization.
 - Marching cubes for `NeRF->Mesh` and `SDF->Mesh` conversion.
 - A spline-based camera path editor to create videos.
@@ -92,20 +92,20 @@ This codebase comes with an interactive testbed that includes many features beyo
 - And many more task-specific settings.
 - See also our [one minute demonstration video of the tool](https://nvlabs.github.io/instant-ngp/assets/mueller2022instant.mp4).
 
-Let's start using the testbed; more information about the GUI and other scripts follow these test scenes.
+Let's start using __instant-ngp__; more information about the GUI and other scripts follow these test scenes.
 
 ### NeRF fox
 
 One test scene is provided in this repository, using a small number of frames from a casually captured phone video:
 
 ```sh
-instant-ngp$ ./build/testbed --scene data/nerf/fox
+instant-ngp$ ./build/instant-ngp --scene data/nerf/fox
 ```
 
 On Windows you need to reverse the slashes here (and below), i.e.:
 
 ```sh
-instant-ngp> .\build\testbed --scene data\nerf\fox
+instant-ngp> .\build\instant-ngp --scene data\nerf\fox
 ```
 
 <img src="docs/assets_readme/fox.png"/>
@@ -114,7 +114,7 @@ Alternatively, download any NeRF-compatible scene (e.g. from the [NeRF authors'
 Now you can run:
 
 ```sh
-instant-ngp$ ./build/testbed --scene data/nerf_synthetic/lego/transforms_train.json
+instant-ngp$ ./build/instant-ngp --scene data/nerf_synthetic/lego/transforms_train.json
 ```
 
 **[To prepare your own dataset for use with our NeRF implementation, click here.](docs/nerf_dataset_tips.md)** See also [this video](https://www.youtube.com/watch?v=8GbENSmdVeE) for a guided walkthrough.
@@ -122,7 +122,7 @@ instant-ngp$ ./build/testbed --scene data/nerf_synthetic/lego/transforms_train.j
 ### SDF armadillo
 
 ```sh
-instant-ngp$ ./build/testbed --scene data/sdf/armadillo.obj
+instant-ngp$ ./build/instant-ngp --scene data/sdf/armadillo.obj
 ```
 
 <img src="docs/assets_readme/armadillo.png"/>
@@ -130,7 +130,7 @@ instant-ngp$ ./build/testbed --scene data/sdf/armadillo.obj
 ### Image of Einstein
 
 ```sh
-instant-ngp$ ./build/testbed --scene data/image/albert.exr
+instant-ngp$ ./build/instant-ngp --scene data/image/albert.exr
 ```
 
 <img src="docs/assets_readme/albert.png"/>
@@ -138,7 +138,7 @@ instant-ngp$ ./build/testbed --scene data/image/albert.exr
 To reproduce the gigapixel results, download, for example, [the Tokyo image](https://www.flickr.com/photos/trevor_dobson_inefekt69/29314390837) and convert it to `.bin` using the `scripts/convert_image.py` script. This custom format improves compatibility and loading speed when resolution is high. Now you can run:
 
 ```sh
-instant-ngp$ ./build/testbed --scene data/image/tokyo.bin
+instant-ngp$ ./build/instant-ngp --scene data/image/tokyo.bin
 ```
 
 
@@ -147,14 +147,14 @@ instant-ngp$ ./build/testbed --scene data/image/tokyo.bin
 Download the [nanovdb volume for the Disney cloud](https://drive.google.com/drive/folders/1SuycSAOSG64k2KLV7oWgyNWyCvZAkafK?usp=sharing), which is derived [from here](https://disneyanimation.com/data-sets/?drawer=/resources/clouds/) ([CC BY-SA 3.0](https://media.disneyanimation.com/uploads/production/data_set_asset/6/asset/License_Cloud.pdf)).
 
 ```sh
-instant-ngp$ ./build/testbed --mode volume --scene data/volume/wdas_cloud_quarter.nvdb
+instant-ngp$ ./build/instant-ngp --mode volume --scene data/volume/wdas_cloud_quarter.nvdb
 ```
 <img src="docs/assets_readme/cloud.png"/>
 
 
-### Testbed controls
+### GUI controls
 
-Here are the main keyboard controls for the testbed application.
+Here are the main keyboard controls for the __instant-ngp__ application.
 
 | Key             | Meaning       |
 | :-------------: | ------------- |
@@ -171,7 +171,7 @@ Here are the main keyboard controls for the testbed application.
 | , / .           | Shows the previous / next visualized layer; hit M to escape. |
 | 1-8             | Switches among various render modes, with 2 being the standard one. You can see the list of render mode names in the control interface. |
 
-There are many controls in the __instant-ngp__ GUI when the testbed program is run.
+There are many controls in the __instant-ngp__ GUI.
 First, note that this GUI can be moved and resized, as can the "Camera path" GUI (which first must be expanded to be used).
 
 Some popular user controls in __instant-ngp__ are:
@@ -185,10 +185,10 @@ The "Camera path" GUI lets you set frames along a path. "Add from cam" is the ma
 
 ## Python bindings
 
-To conduct controlled experiments in an automated fashion, all features from the interactive testbed (and more!) have Python bindings that can be easily instrumented.
-For an example of how the `./build/testbed` application can be implemented and extended from within Python, see `./scripts/run.py`, which supports a superset of the command line arguments that `./build/testbed` does.
+To conduct controlled experiments in an automated fashion, all features from the interactive GUI (and more!) have Python bindings that can be easily instrumented.
+For an example of how the `./build/instant-ngp` application can be implemented and extended from within Python, see `./scripts/run.py`, which supports a superset of the command line arguments that `./build/instant-ngp` does.
 
-Here is a typical command line using `scripts/run.py` to generate a 5-second flythrough of the fox dataset to the (default) file `video.mp4`, after using the testbed to save a (default) NeRF snapshot `base.msgpack` and a set of camera key frames: (see [this video](https://www.youtube.com/watch?v=8GbENSmdVeE) for a guided walkthrough)
+Here is a typical command line using `scripts/run.py` to generate a 5-second flythrough of the fox dataset to the (default) file `video.mp4`, after using the GUI to save a (default) NeRF snapshot `base.msgpack` and a set of camera key frames: (see [this video](https://www.youtube.com/watch?v=8GbENSmdVeE) for a guided walkthrough)
 
 ```sh
 instant-ngp$ python scripts/run.py --mode nerf --scene data/nerf/fox --load_snapshot data/nerf/fox/base.msgpack --video_camera_path data/nerf/fox/base_cam.json --video_n_seconds 5 --video_fps 60 --width 1920 --height 1080
@@ -203,7 +203,7 @@ Happy hacking!
 
 __Q:__ How can I run __instant-ngp__ in headless mode?
 
-__A:__ Use `./build/testbed --no-gui` or `python scripts/run.py`. You can also compile without GUI via `cmake -DNGP_BUILD_WITH_GUI=off ...`
+__A:__ Use `./build/instant-ngp --no-gui` or `python scripts/run.py`. You can also compile without GUI via `cmake -DNGP_BUILD_WITH_GUI=off ...`
 
 ##
 __Q:__ Does this codebase run on [Google Colab](https://colab.research.google.com/)?
diff --git a/docs/nerf_dataset_tips.md b/docs/nerf_dataset_tips.md
index acedc7f9bd20c8a0689e55e06c71476fb3c5f1f0..47de07acc1b8e0ed0d733034e9ca7aacc5730272 100644
--- a/docs/nerf_dataset_tips.md
+++ b/docs/nerf_dataset_tips.md
@@ -84,7 +84,7 @@ The `aabb_scale` parameter is the most important `instant-ngp` specific paramete
 Assuming success, you can now train your NeRF model as follows, starting in the `instant-ngp` folder:
 
 ```sh
-instant-ngp$ ./build/testbed --mode nerf --scene [path to training data folder containing transforms.json]
+instant-ngp$ ./build/instant-ngp --mode nerf --scene [path to training data folder containing transforms.json]
 ```
 
 ### Record3D
@@ -102,7 +102,7 @@ With an >=iPhone 12 Pro, one can use [Record3D](https://record3d.app/) to collec
 
 5. Launch Instant-NGP training:
 	```
-	./build/testbed --scene path/to/data
+	./build/instant-ngp --scene path/to/data
 	```
 
 ## Tips for NeRF training data
diff --git a/notebooks/instant_ngp.ipynb b/notebooks/instant_ngp.ipynb
index 9bf7093b5c40713f88121dc12b658d0928dc0df3..55590117cf7e4be1bc7582c8a6cb00e59ba54ad1 100644
--- a/notebooks/instant_ngp.ipynb
+++ b/notebooks/instant_ngp.ipynb
@@ -2,6 +2,9 @@
   "cells": [
     {
       "cell_type": "markdown",
+      "metadata": {
+        "id": "-fAjMk8jDvp5"
+      },
       "source": [
         "# Instant-ngp \n",
         "\n",
@@ -12,28 +15,25 @@
         "It has been tested on a GTX 1050ti in the local machine and an assigned Tesla T4 in the remote one.\n",
         "\n",
         "Based on this [notebook](https://colab.research.google.com/drive/10TgQ4gyVejlHiinrmm5XOvQQmgVziK3i?usp=sharing) by [@myagues](https://github.com/NVlabs/instant-ngp/issues/6#issuecomment-1016397579), the main differences being the addition of steps 3 and 4 to ensure compatibility between the local machine and the models trained in the remote machine, of step 10 to render a video from the scene, and a more guided approach."
-      ],
-      "metadata": {
-        "id": "-fAjMk8jDvp5"
-      }
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 1.Connect to a GPU runtime"
-      ],
       "metadata": {
         "id": "SxsmRf03DFYe"
-      }
+      },
+      "source": [
+        "## 1.Connect to a GPU runtime"
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "Connect your colab session to a GPU runtime and check that you have been assigned a GPU. It should have a minimum of 8GB of available memory."
-      ],
       "metadata": {
         "id": "NyjFWI3WDPBr"
-      }
+      },
+      "source": [
+        "Connect your colab session to a GPU runtime and check that you have been assigned a GPU. It should have a minimum of 8GB of available memory."
+      ]
     },
     {
       "cell_type": "code",
@@ -47,8 +47,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "Fri Jul 29 20:15:33 2022       \n",
             "+-----------------------------------------------------------------------------+\n",
@@ -79,12 +79,12 @@
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 2. Install dependencies and clone the instant-ngp repo"
-      ],
       "metadata": {
         "id": "Da9sDpM-DXps"
-      }
+      },
+      "source": [
+        "## 2. Install dependencies and clone the instant-ngp repo"
+      ]
     },
     {
       "cell_type": "code",
@@ -98,8 +98,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "\u001b[33m\r0% [Working]\u001b[0m\r            \rGet:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\n",
             "\u001b[33m\r0% [Connecting to archive.ubuntu.com (185.125.190.39)] [1 InRelease 14.2 kB/88.\u001b[0m\r                                                                               \rGet:2 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease [3,626 B]\n",
@@ -417,8 +417,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "Cloning into 'instant-ngp'...\n",
             "remote: Enumerating objects: 2473, done.\u001b[K\n",
@@ -531,23 +531,20 @@
     },
     {
       "cell_type": "markdown",
+      "metadata": {
+        "id": "SF4CRM2-rqn1"
+      },
       "source": [
         "## 3. Set compute capability\n",
         "Find the compute capability of the GPU in your **local** machine in the following link:\n",
         "https://developer.nvidia.com/cuda-gpus\n",
         "\n",
         "You need this to be able to open your trained models in `testbed` inside your local machine later on, so you can explore them or trace a camera path in order to generate a video from your scene."
-      ],
-      "metadata": {
-        "id": "SF4CRM2-rqn1"
-      }
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "compute_capability = \"61\" #@param [50, 52, 60, 61, 70, 72, 75, 80, 86, 87]\n",
-        "%env TCNN_CUDA_ARCHITECTURES=$compute_capability\n"
-      ],
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -555,37 +552,35 @@
         "id": "Yf9H-wO0o1Ax",
         "outputId": "e6332360-f07d-487b-abda-9ce76ed379fc"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "env: TCNN_CUDA_ARCHITECTURES=61\n"
           ]
         }
+      ],
+      "source": [
+        "compute_capability = \"61\" #@param [50, 52, 60, 61, 70, 72, 75, 80, 86, 87]\n",
+        "%env TCNN_CUDA_ARCHITECTURES=$compute_capability\n"
       ]
     },
     {
       "cell_type": "markdown",
+      "metadata": {
+        "id": "X6PLafxjtoc1"
+      },
       "source": [
         "## 4. Set the right network configuration\n",
         "For compatibility between the model trained here and the local machine, a network with FP32 or FP16 is chosen.\n",
         "\n",
         "https://docs.nvidia.com/deeplearning/tensorrt/support-matrix/index.html#hardware-precision-matrix "
-      ],
-      "metadata": {
-        "id": "X6PLafxjtoc1"
-      }
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "network_type = \"FullyFusedMLP\" if int(compute_capability) >= 70 else \"CutlassMLP\"\n",
-        "print(f\"Using {network_type}\")\n",
-        "%env NN_CONFIG_PATH = ./configs/nerf/base.json\n",
-        "!jq '.network.otype = \"CutlassMLP\" | .rgb_network.otype = \"CutlassMLP\"' $NN_CONFIG_PATH | sponge $NN_CONFIG_PATH"
-      ],
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -593,26 +588,31 @@
         "id": "KFZAbFtUqQkc",
         "outputId": "92cd4308-62ef-465f-d996-1ed301f1c4f4"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "Using CutlassMLP\n",
             "env: NN_CONFIG_PATH=./configs/nerf/base.json\n"
           ]
         }
+      ],
+      "source": [
+        "network_type = \"FullyFusedMLP\" if int(compute_capability) >= 70 else \"CutlassMLP\"\n",
+        "print(f\"Using {network_type}\")\n",
+        "%env NN_CONFIG_PATH = ./configs/nerf/base.json\n",
+        "!jq '.network.otype = \"CutlassMLP\" | .rgb_network.otype = \"CutlassMLP\"' $NN_CONFIG_PATH | sponge $NN_CONFIG_PATH"
       ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 5. Build the project and install python requirements"
-      ],
       "metadata": {
         "id": "P9XhY2souWum"
-      }
+      },
+      "source": [
+        "## 5. Build the project and install python requirements"
+      ]
     },
     {
       "cell_type": "code",
@@ -626,8 +626,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "-- Obtained target architecture from environment variable TCNN_CUDA_ARCHITECTURES=61\n",
             "-- Targeting GPU architectures: 61\n",
@@ -682,8 +682,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "\u001b[35m\u001b[1mConsolidate compiler generated dependencies of target fmt\u001b[0m\n",
             "[  8%] Built target fmt\n",
@@ -5644,8 +5644,8 @@
       },
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
             "Collecting commentjson~=0.9.0\n",
@@ -5701,7 +5701,6 @@
           ]
         },
         {
-          "output_type": "display_data",
           "data": {
             "application/vnd.colab-display-data+json": {
               "pip_warning": {
@@ -5712,7 +5711,8 @@
               }
             }
           },
-          "metadata": {}
+          "metadata": {},
+          "output_type": "display_data"
         }
       ],
       "source": [
@@ -5721,6 +5721,9 @@
     },
     {
       "cell_type": "markdown",
+      "metadata": {
+        "id": "cHLYSiD05EnL"
+      },
       "source": [
         "## 6. [LOCAL MACHINE] Run COLMAP on your scene\n",
         "COLMAP doesn't work on machines without a GUI.\n",
@@ -5728,43 +5731,43 @@
         "Go to your local machine and follow the [instructions](https://github.com/NVlabs/instant-ngp/blob/master/docs/nerf_dataset_tips.md#preparing-new-nerf-datasets) to run COLMAP from a video or a set of images to generate camera positions from your scene.\n",
         "\n",
         "After this, you should have an images folder, with the images of your scene, and a `transforms.json` file with the camera information extracted by COLMAP."
-      ],
-      "metadata": {
-        "id": "cHLYSiD05EnL"
-      }
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 7. Upload your scene"
-      ],
       "metadata": {
         "id": "ZQP4PAyru3KA"
-      }
+      },
+      "source": [
+        "## 7. Upload your scene"
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "Mount your google drive"
-      ],
       "metadata": {
         "id": "h3Tl_nNpzfPR"
-      }
+      },
+      "source": [
+        "Mount your google drive"
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "from google.colab import drive\n",
-        "drive.mount('/content/drive')"
-      ],
+      "execution_count": null,
       "metadata": {
         "id": "a-CfnpVUze1G"
       },
-      "execution_count": null,
-      "outputs": []
+      "outputs": [],
+      "source": [
+        "from google.colab import drive\n",
+        "drive.mount('/content/drive')"
+      ]
     },
     {
       "cell_type": "markdown",
+      "metadata": {
+        "id": "WOx86Jz5xOQP"
+      },
       "source": [
         "Then upload the `images` folder and the output of COLMAP, `transforms.json`, to your drive. The structure should be similar to the following:\n",
         "```\n",
@@ -5776,62 +5779,54 @@
         "    └── transforms.json\n",
         "```\n",
         "\n"
-      ],
-      "metadata": {
-        "id": "WOx86Jz5xOQP"
-      }
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "Enter the path to your scene"
-      ],
       "metadata": {
         "id": "iNhWLCgH20-g"
-      }
+      },
+      "source": [
+        "Enter the path to your scene"
+      ]
     },
     {
       "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "ayZ2gWkTz3sU"
+      },
+      "outputs": [],
       "source": [
         "import os\n",
         "scene_path = \"/content/drive/MyDrive/nerf_scenes/fox\" #@param {type:\"string\"}\n",
         "if not os.path.isdir(scene_path):\n",
         "  raise NotADirectoryError(scene_path)"
-      ],
-      "metadata": {
-        "id": "ayZ2gWkTz3sU"
-      },
-      "execution_count": null,
-      "outputs": []
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 8. Train a model on your scene!"
-      ],
       "metadata": {
         "id": "YPr9nJ-w2_0J"
-      }
+      },
+      "source": [
+        "## 8. Train a model on your scene!"
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "train_steps = 2000  #@param {type:\"integer\"}\n",
-        "snapshot_path = os.path.join(scene_path, f\"{train_steps}.msgpack\")\n",
-        "!python ./scripts/run.py --scene {scene_path} --mode nerf --n_steps {train_steps} --save_snapshot {snapshot_path}"
-      ],
+      "execution_count": null,
       "metadata": {
-        "id": "aijHZB0zJwWB",
         "colab": {
           "base_uri": "https://localhost:8080/"
         },
+        "id": "aijHZB0zJwWB",
         "outputId": "b947a4a1-e158-47a5-a4b9-a2e7d4db9290"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "\u001b[0m22:27:10 \u001b[0;36mINFO     \u001b[0mLoading NeRF dataset from\u001b[K\u001b[0m\n",
             "22:27:10 \u001b[0;36mINFO     \u001b[0m  /content/drive/MyDrive/nerf_scenes/fox/transforms.json\u001b[K\u001b[0m\n",
@@ -5848,10 +5843,19 @@
             "\u001b[0m"
           ]
         }
+      ],
+      "source": [
+        "train_steps = 2000  #@param {type:\"integer\"}\n",
+        "snapshot_path = os.path.join(scene_path, f\"{train_steps}.msgpack\")\n",
+        "!python ./scripts/run.py --scene {scene_path} --mode nerf --n_steps {train_steps} --save_snapshot {snapshot_path}"
       ]
     },
     {
+      "attachments": {},
       "cell_type": "markdown",
+      "metadata": {
+        "id": "RWuAHgOw8M4s"
+      },
       "source": [
         "## 9. [LOCAL MACHINE] Generate a camera path\n",
         "\n",
@@ -5859,67 +5863,55 @@
         "\n",
         "Example command:\n",
         "```\n",
-        "./build/testbed --scene data/nerf/fox --no-train --snapshot /data/nerf/fox/2000.msgpack\n",
+        "./build/instant-ngp --scene data/nerf/fox --no-train --snapshot /data/nerf/fox/2000.msgpack\n",
         "```\n",
         "\n",
         "After you're done, **upload `base_cam.json` to the root folder of your scene.**"
-      ],
-      "metadata": {
-        "id": "RWuAHgOw8M4s"
-      }
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "## 10. Render video"
-      ],
       "metadata": {
         "id": "L5XVO_oi-riY"
-      }
+      },
+      "source": [
+        "## 10. Render video"
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "Make sure `base_cam.json` exists:"
-      ],
       "metadata": {
         "id": "x_2t4NHHAvn5"
-      }
+      },
+      "source": [
+        "Make sure `base_cam.json` exists:"
+      ]
     },
     {
       "cell_type": "code",
+      "execution_count": null,
+      "metadata": {
+        "id": "04Yt6prm_FJI"
+      },
+      "outputs": [],
       "source": [
         "video_camera_path = os.path.join(scene_path, \"base_cam.json\")\n",
         "if not os.path.isfile(video_camera_path):\n",
         "  raise FileNotFoundError(video_camera_path)"
-      ],
-      "metadata": {
-        "id": "04Yt6prm_FJI"
-      },
-      "execution_count": null,
-      "outputs": []
+      ]
     },
     {
       "cell_type": "markdown",
-      "source": [
-        "Render the video"
-      ],
       "metadata": {
         "id": "xdFuUotyA0HV"
-      }
+      },
+      "source": [
+        "Render the video"
+      ]
     },
     {
       "cell_type": "code",
-      "source": [
-        "video_n_seconds = 5 #@param {type:\"integer\"}\n",
-        "video_fps = 25 #@param {type:\"integer\"}\n",
-        "width = 720 #@param {type:\"integer\"}\n",
-        "height = 720 #@param {type:\"integer\"}\n",
-        "output_video_path = os.path.join(scene_path, \"output_video.mp4\")\n",
-        "\n",
-        "!python scripts/run.py --mode nerf --scene {scene_path} --load_snapshot {snapshot_path} --video_camera_path {video_camera_path} --video_n_seconds 2 --video_fps 25 --width 720 --height 720 --video_output {output_video_path}\n",
-        "print(f\"Generated video saved to:\\n{output_video_path}\")"
-      ],
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -5927,11 +5919,10 @@
         "id": "d3XJeTeF1yJE",
         "outputId": "e493bdd8-903e-4c8e-ab01-8c8baed8b5dc"
       },
-      "execution_count": null,
       "outputs": [
         {
-          "output_type": "stream",
           "name": "stdout",
+          "output_type": "stream",
           "text": [
             "\u001b[0m22:32:31 \u001b[0;36mINFO     \u001b[0mLoading NeRF dataset from\u001b[K\u001b[0m\n",
             "22:32:31 \u001b[0;36mINFO     \u001b[0m  /content/drive/MyDrive/nerf_scenes/fox/transforms.json\u001b[K\u001b[0m\n",
@@ -6000,10 +5991,21 @@
             "/content/drive/MyDrive/nerf_scenes/fox/output_video.mp4\n"
           ]
         }
+      ],
+      "source": [
+        "video_n_seconds = 5 #@param {type:\"integer\"}\n",
+        "video_fps = 25 #@param {type:\"integer\"}\n",
+        "width = 720 #@param {type:\"integer\"}\n",
+        "height = 720 #@param {type:\"integer\"}\n",
+        "output_video_path = os.path.join(scene_path, \"output_video.mp4\")\n",
+        "\n",
+        "!python scripts/run.py --mode nerf --scene {scene_path} --load_snapshot {snapshot_path} --video_camera_path {video_camera_path} --video_n_seconds 2 --video_fps 25 --width 720 --height 720 --video_output {output_video_path}\n",
+        "print(f\"Generated video saved to:\\n{output_video_path}\")"
       ]
     }
   ],
   "metadata": {
+    "accelerator": "GPU",
     "colab": {
       "collapsed_sections": [],
       "name": "instant_ngp.ipynb",
@@ -6011,13 +6013,19 @@
     },
     "kernelspec": {
       "display_name": "Python 3",
+      "language": "python",
       "name": "python3"
     },
     "language_info": {
-      "name": "python"
+      "name": "python",
+      "version": "3.10.5 (tags/v3.10.5:f377153, Jun  6 2022, 16:14:13) [MSC v.1929 64 bit (AMD64)]"
     },
-    "accelerator": "GPU"
+    "vscode": {
+      "interpreter": {
+        "hash": "80f0ca567e8a8332be8d0227e77114b80c729e82298f4777b19db59a6217bb0d"
+      }
+    }
   },
   "nbformat": 4,
   "nbformat_minor": 0
-}
\ No newline at end of file
+}
diff --git a/scripts/run.py b/scripts/run.py
index b4689f8f87bfc10d769e817e9ea8aa68635f42b9..56636dbc435df7521e373aaf3ae4893a4a7c3e06 100644
--- a/scripts/run.py
+++ b/scripts/run.py
@@ -25,7 +25,7 @@ from tqdm import tqdm
 import pyngp as ngp # noqa
 
 def parse_args():
-	parser = argparse.ArgumentParser(description="Run neural graphics primitives testbed with additional configuration & output options")
+	parser = argparse.ArgumentParser(description="Run instant neural graphics primitives with additional configuration & output options")
 
 	parser.add_argument("--scene", "--training_data", default="", help="The scene to load. Can be the scene's name or a full path to the training data.")
 	parser.add_argument("--mode", default="", const="nerf", nargs="?", choices=["nerf", "sdf", "image", "volume"], help="Mode can be 'nerf', 'sdf', 'image' or 'volume'. Inferred from the scene if unspecified.")
diff --git a/src/main.cu b/src/main.cu
index 445fdd2a2bf7c6939dfd007983d0eaaffba9e74d..480d3afec0040b5b1261a6a24fefec1d47eea94c 100644
--- a/src/main.cu
+++ b/src/main.cu
@@ -28,8 +28,8 @@ namespace fs = ::filesystem;
 
 int main(int argc, char** argv) {
 	ArgumentParser parser{
-		"neural graphics primitives\n"
-		"version " NGP_VERSION,
+		"Instant Neural Graphics Primitives\n"
+		"Version " NGP_VERSION,
 		"",
 	};
 
@@ -99,7 +99,7 @@ int main(int argc, char** argv) {
 	Flag version_flag{
 		parser,
 		"VERSION",
-		"Display the version of neural graphics primitives.",
+		"Display the version of instant neural graphics primitives.",
 		{'v', "version"},
 	};
 
@@ -121,7 +121,7 @@ int main(int argc, char** argv) {
 	}
 
 	if (version_flag) {
-		tlog::none() << "neural graphics primitives version " NGP_VERSION;
+		tlog::none() << "Instant Neural Graphics Primitives v" NGP_VERSION;
 		return 0;
 	}
 
diff --git a/src/testbed.cu b/src/testbed.cu
index b7bff31029fd531cbbc1b2f5eb3bc4f68a6d8d6e..6daccd30c002a576d297af0ff864158ed1700600 100644
--- a/src/testbed.cu
+++ b/src/testbed.cu
@@ -1905,7 +1905,7 @@ void Testbed::init_window(int resw, int resh, bool hidden, bool second_window) {
 	glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
 	glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
 	glfwWindowHint(GLFW_VISIBLE, hidden ? GLFW_FALSE : GLFW_TRUE);
-	std::string title = "Neural graphics primitives (";
+	std::string title = "Instant Neural Graphics Primitives v" NGP_VERSION " (";
 	switch (m_testbed_mode) {
 		case ETestbedMode::Image: title += "Image"; break;
 		case ETestbedMode::Sdf: title += "SDF"; break;