diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/config/config.rviz b/config/config.rviz
new file mode 100644
index 0000000000000000000000000000000000000000..08bd9f18b55f5cd1dd305bd8b205c5fea2647ade
--- /dev/null
+++ b/config/config.rviz
@@ -0,0 +1,164 @@
+Panels:
+  - Class: rviz_common/Displays
+    Help Height: 78
+    Name: Displays
+    Property Tree Widget:
+      Expanded:
+        - /Global Options1
+        - /Status1
+        - /Image1
+      Splitter Ratio: 0.5
+    Tree Height: 94
+  - Class: rviz_common/Selection
+    Name: Selection
+  - Class: rviz_common/Tool Properties
+    Expanded:
+      - /2D Goal Pose1
+      - /Publish Point1
+    Name: Tool Properties
+    Splitter Ratio: 0.5886790156364441
+  - Class: rviz_common/Views
+    Expanded:
+      - /Current View1
+    Name: Views
+    Splitter Ratio: 0.5
+  - Class: rviz_common/Time
+    Experimental: false
+    Name: Time
+    SyncMode: 0
+    SyncSource: ""
+Visualization Manager:
+  Class: ""
+  Displays:
+    - Alpha: 0.5
+      Cell Size: 1
+      Class: rviz_default_plugins/Grid
+      Color: 160; 160; 164
+      Enabled: true
+      Line Style:
+        Line Width: 0.029999999329447746
+        Value: Lines
+      Name: Grid
+      Normal Cell Count: 0
+      Offset:
+        X: 0
+        Y: 0
+        Z: 0
+      Plane: XY
+      Plane Cell Count: 10
+      Reference Frame: <Fixed Frame>
+      Value: true
+    - Class: rviz_default_plugins/Image
+      Enabled: true
+      Max Value: 1
+      Median window: 5
+      Min Value: 0
+      Name: Image
+      Normalize Range: true
+      Topic:
+        Depth: 5
+        Durability Policy: Volatile
+        History Policy: Keep Last
+        Reliability Policy: Reliable
+        Value: /camera/depth
+      Value: true
+    - Class: rviz_default_plugins/Image
+      Enabled: true
+      Max Value: 1
+      Median window: 5
+      Min Value: 0
+      Name: Image
+      Normalize Range: true
+      Topic:
+        Depth: 5
+        Durability Policy: Volatile
+        History Policy: Keep Last
+        Reliability Policy: Reliable
+        Value: /camera/rgb
+      Value: true
+  Enabled: true
+  Global Options:
+    Background Color: 48; 48; 48
+    Fixed Frame: map
+    Frame Rate: 30
+  Name: root
+  Tools:
+    - Class: rviz_default_plugins/Interact
+      Hide Inactive Objects: true
+    - Class: rviz_default_plugins/MoveCamera
+    - Class: rviz_default_plugins/Select
+    - Class: rviz_default_plugins/FocusCamera
+    - Class: rviz_default_plugins/Measure
+      Line color: 128; 128; 0
+    - Class: rviz_default_plugins/SetInitialPose
+      Covariance x: 0.25
+      Covariance y: 0.25
+      Covariance yaw: 0.06853891909122467
+      Topic:
+        Depth: 5
+        Durability Policy: Volatile
+        History Policy: Keep Last
+        Reliability Policy: Reliable
+        Value: /initialpose
+    - Class: rviz_default_plugins/SetGoal
+      Topic:
+        Depth: 5
+        Durability Policy: Volatile
+        History Policy: Keep Last
+        Reliability Policy: Reliable
+        Value: /goal_pose
+    - Class: rviz_default_plugins/PublishPoint
+      Single click: true
+      Topic:
+        Depth: 5
+        Durability Policy: Volatile
+        History Policy: Keep Last
+        Reliability Policy: Reliable
+        Value: /clicked_point
+  Transformation:
+    Current:
+      Class: rviz_default_plugins/TF
+  Value: true
+  Views:
+    Current:
+      Class: rviz_default_plugins/Orbit
+      Distance: 10
+      Enable Stereo Rendering:
+        Stereo Eye Separation: 0.05999999865889549
+        Stereo Focal Distance: 1
+        Swap Stereo Eyes: false
+        Value: false
+      Focal Point:
+        X: 0
+        Y: 0
+        Z: 0
+      Focal Shape Fixed Size: true
+      Focal Shape Size: 0.05000000074505806
+      Invert Z Axis: false
+      Name: Current View
+      Near Clip Distance: 0.009999999776482582
+      Pitch: 0.785398006439209
+      Target Frame: <Fixed Frame>
+      Value: Orbit (rviz)
+      Yaw: 0.785398006439209
+    Saved: ~
+Window Geometry:
+  Displays:
+    collapsed: false
+  Height: 846
+  Hide Left Dock: false
+  Hide Right Dock: false
+  Image:
+    collapsed: false
+  QMainWindow State: 000000ff00000000fd000000040000000000000156000002b4fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000000e7000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650100000128000000dd0000002800fffffffb0000000a0049006d006100670065010000020b000000e40000002800ffffff000000010000010f000002b4fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b000002b4000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650100000000000004b00000025300fffffffb0000000800540069006d006501000000000000045000000000000000000000023f000002b400000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+  Selection:
+    collapsed: false
+  Time:
+    collapsed: false
+  Tool Properties:
+    collapsed: false
+  Views:
+    collapsed: false
+  Width: 1200
+  X: 67
+  Y: 60
diff --git a/generated_materials/camera_path.json b/generated_materials/camera_path.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c48c13729853e5b83723ef1147f06c12ddee4d3
--- /dev/null
+++ b/generated_materials/camera_path.json
@@ -0,0 +1,42 @@
+{
+    "camera_type": "perspective",
+    "render_height": 480,
+    "render_width": 640,
+    "camera_path": [
+        {
+            "fov": 70,
+            "aspect": 1.33333333333,
+            "camera_to_world": [
+                [
+                    -0.7712574369763269,
+                    0.1513242754712088,
+                    -0.6182741540464238,
+                    0.37396875264637797
+                ],
+                [
+                    -0.6360692717618674,
+                    -0.21990707861025355,
+                    0.7396328537169178,
+                    0.009873900166690274
+                ],
+                [
+                    -0.024038457293159843,
+                    0.9637125299746899,
+                    0.26585769152075983,
+                    -0.07500160249766082
+                ],
+                [
+                    0.0,
+                    0.0,
+                    0.0,
+                    1.0
+                ]
+            ]
+        }
+    ],
+    "fps": 24,
+    "seconds": 4,
+    "smoothness_value": 0.5,
+    "is_cycle": "false",
+    "crop": null
+}
\ No newline at end of file
diff --git a/generated_materials/nerf_image.png b/generated_materials/nerf_image.png
new file mode 100644
index 0000000000000000000000000000000000000000..f50e30ecbed3176c59367a8e4b1be2091df740b5
Binary files /dev/null and b/generated_materials/nerf_image.png differ
diff --git a/launch/__pycache__/basic_sim_example_launch.cpython-310.pyc b/launch/__pycache__/basic_sim_example_launch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c13c126399684f7c1289017b9ac5f90d9305862a
Binary files /dev/null and b/launch/__pycache__/basic_sim_example_launch.cpython-310.pyc differ
diff --git a/launch/basic_sim_example_launch.py b/launch/basic_sim_example_launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e3b991f3e52518fa52b90dd566475b02d8cdd9c
--- /dev/null
+++ b/launch/basic_sim_example_launch.py
@@ -0,0 +1,41 @@
+from launch import LaunchDescription
+from launch_ros.actions import Node
+
+import os
+from ament_index_python.packages import get_package_share_directory
+
+
+def generate_launch_description():
+    rviz_config_dir = os.path.join(get_package_share_directory(
+        'nerf_sim'), 'config', 'config.rviz')
+    print(rviz_config_dir)
+    assert os.path.exists(rviz_config_dir)
+    
+    return LaunchDescription([
+        Node(
+            package='nerf_sim',
+            executable='sim_depth_generator',
+            name='sim_depth_generator'
+        ),
+        Node(
+            package='nerf_sim',
+            executable='sim_rgb_generator',
+            name='sim_rgb_generator'
+        ),
+        Node(
+            package='nerf_sim',
+            executable='pose_generator',
+            name='pose_generator'
+        ),
+        Node(
+            package='nerf_sim',
+            executable='pcd_generator',
+            name='pcd_generator'
+        ),
+        Node(
+            package='rviz2',
+            executable='rviz2',
+            name='rviz2',
+            arguments=['-d', rviz_config_dir]
+        )
+    ])
\ No newline at end of file
diff --git a/nerf_sim/__init__.py b/nerf_sim/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/nerf_sim/__pycache__/__init__.cpython-310.pyc b/nerf_sim/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..346949c37e9dcda88499b936a9c41f218e848f34
Binary files /dev/null and b/nerf_sim/__pycache__/__init__.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/image_generator.cpython-310.pyc b/nerf_sim/__pycache__/image_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..036ec4d4f09a06f22845bcb19aac8fe184590c82
Binary files /dev/null and b/nerf_sim/__pycache__/image_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/image_listener.cpython-310.pyc b/nerf_sim/__pycache__/image_listener.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6bbf18aba9b7b18a2b12c55d72c90da61959165f
Binary files /dev/null and b/nerf_sim/__pycache__/image_listener.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/nerf_render.cpython-310.pyc b/nerf_sim/__pycache__/nerf_render.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67c8120d4b5521aa01e2737672e9544f2e1be06d
Binary files /dev/null and b/nerf_sim/__pycache__/nerf_render.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc b/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cea66041ff744a63bec567f794f74aae6112038
Binary files /dev/null and b/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/pose_generator.cpython-310.pyc b/nerf_sim/__pycache__/pose_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67e6559c5b41dae210b955b205be963e4f746e07
Binary files /dev/null and b/nerf_sim/__pycache__/pose_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc b/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2cda0000a9ab877dc1bc2beee547aa3b5f2a29b
Binary files /dev/null and b/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/sim_image_generator.cpython-310.pyc b/nerf_sim/__pycache__/sim_image_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e87c11bef31dc2f3e512a076113dae882515765e
Binary files /dev/null and b/nerf_sim/__pycache__/sim_image_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc b/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac3c028df3e5426c0c50f9664ec583fa1895d07a
Binary files /dev/null and b/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/simple_image_generator.cpython-310.pyc b/nerf_sim/__pycache__/simple_image_generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b5833e4225968ef4339fb50dda41c68a268e4dc
Binary files /dev/null and b/nerf_sim/__pycache__/simple_image_generator.cpython-310.pyc differ
diff --git a/nerf_sim/nerf_render.py b/nerf_sim/nerf_render.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a4f746ffda7b0775e9a2199d11c94a3bedf722b
--- /dev/null
+++ b/nerf_sim/nerf_render.py
@@ -0,0 +1,134 @@
+import json
+import argparse
+import mediapy as media
+import numpy as np
+import torch
+
+# Typing
+from pathlib import Path
+from typing import Any, Dict, List, Literal, Optional, Union
+
+from rich.progress import (
+    track
+)
+
+from nerfstudio.cameras.camera_paths import (
+    get_path_from_json # TODO: remove for ros
+)
+
+from nerfstudio.cameras.cameras import Cameras
+from nerfstudio.pipelines.base_pipeline import Pipeline
+
+from nerfstudio.utils import colormaps
+from nerfstudio.utils.eval_utils import eval_setup
+
+def _render_trajectory_video(
+    pipeline: Pipeline,
+    cameras: Cameras,
+    output_filename: Path,
+    rendered_output_names: List[str],
+    crop_data = None,
+    rendered_resolution_scaling_factor: float = 1.0,
+    seconds: float = 5.0,
+    output_format: Literal["images", "video"] = "video",
+    image_format: Literal["jpeg", "png"] = "png",
+    jpeg_quality: int = 100,
+    colormap_options: colormaps.ColormapOptions = colormaps.ColormapOptions(),
+) -> None:
+    """Helper function to create a video of the spiral trajectory.
+
+    Args:
+        pipeline: Pipeline to evaluate with.
+        cameras: Cameras to render.
+        output_filename: Name of the output file.
+        rendered_output_names: List of outputs to visualise.
+        crop_data: Crop data to apply to the rendered images.
+        rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution.
+        seconds: Length of output video.
+        output_format: How to save output data.
+        colormap_options: Options for colormap.
+    """
+    cameras.rescale_output_resolution(rendered_resolution_scaling_factor)
+    cameras = cameras.to(pipeline.device)
+    fps = len(cameras) / seconds
+
+    output_image_dir = output_filename.parent / output_filename.stem
+    if output_format == "images":
+        output_image_dir.mkdir(parents=True, exist_ok=True)
+        writer = None
+
+        for camera_idx in track(range(cameras.size), description=""):
+            aabb_box = None
+            camera_ray_bundle = cameras.generate_rays(camera_indices=camera_idx, aabb_box=aabb_box)
+                
+            with torch.no_grad():
+                outputs = pipeline.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)
+
+            render_image = []
+            for rendered_output_name in rendered_output_names:
+                output_image = outputs[rendered_output_name]
+                output_image = (
+                    colormaps.apply_colormap(
+                        image=output_image,
+                        colormap_options=colormap_options,
+                    )
+                    .cpu()
+                    .numpy()
+                )
+                render_image.append(output_image)
+            render_image = np.concatenate(render_image, axis=1)
+            if output_format == "images":
+                return render_image
+                """
+                if image_format == "png":
+                    media.write_image(output_image_dir / f"{camera_idx:05d}.png", render_image, fmt="png")
+                if image_format == "jpeg":
+                    media.write_image(
+                        output_image_dir / f"{camera_idx:05d}.jpg", render_image, fmt="jpeg", quality=jpeg_quality
+                    )"""
+                        
+class RenderCameraPath():
+    image_format: Literal["jpeg", "png"] = "jpeg"
+    jpeg_quality: int = 100
+    downscale_factor: float = 1.0
+    eval_num_rays_per_chunk: Optional[int] = None
+    colormap_options: colormaps.ColormapOptions = colormaps.ColormapOptions()
+    load_config: Path
+    output_path: Path = Path("renders/output.mp4")
+    output_format: Literal["images", "video"] = "images"
+    camera_path_filename: Path = Path("camera_path.json")
+    rendered_output_names: List[str] = []
+    
+    def __init__(self):
+        self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml')
+        self.camera_path_filename = Path('/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json')
+        _, self.pipeline, _, _ = eval_setup(
+            self.load_config,
+            eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
+            test_mode="inference",
+        )
+
+    def run(self, render_output_names: List[str] = []):
+        pipeline = self.pipeline
+        with open(self.camera_path_filename, "r", encoding="utf-8") as f:
+                camera_path = json.load(f)
+        seconds = camera_path["seconds"]
+        crop_data = None # we're not cropping the image
+        camera_path = get_path_from_json(camera_path)        
+        self.rendered_output_names = render_output_names
+        
+        render_image = _render_trajectory_video(
+            pipeline,
+            camera_path,
+            output_filename=self.output_path,
+            rendered_output_names=self.rendered_output_names,
+            rendered_resolution_scaling_factor=1.0 / self.downscale_factor,
+            crop_data=crop_data,
+            seconds=seconds,
+            output_format=self.output_format,
+            image_format=self.image_format,
+            jpeg_quality=self.jpeg_quality,
+            colormap_options=self.colormap_options,
+        )
+        return render_image
+
diff --git a/nerf_sim/pcd_publisher_node.py b/nerf_sim/pcd_publisher_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..711fceb739e94500db9b146d8519a762cf84e31a
--- /dev/null
+++ b/nerf_sim/pcd_publisher_node.py
@@ -0,0 +1,114 @@
+
+import sys
+import os
+
+import rclpy 
+from rclpy.node import Node
+import sensor_msgs.msg as sensor_msgs
+import std_msgs.msg as std_msgs
+
+import numpy as np
+import open3d as o3d
+
+class PCDPublisher(Node):
+
+    def __init__(self):
+        super().__init__('pcd_publisher_node')
+
+        pcd_path = '/workspace/ros2_iron/src/nerf_sim/test_materials/point_cloud.ply'
+
+        # I use Open3D to read point clouds and meshes. It's a great library!
+        pcd = o3d.io.read_point_cloud(pcd_path)
+        # I then convert it into a numpy array.
+        self.points = np.asarray(pcd.points)
+        print(self.points.shape)
+        
+        # I create a publisher that publishes sensor_msgs.PointCloud2 to the 
+        # topic 'pcd'. The value '10' refers to the history_depth, which I 
+        # believe is related to the ROS1 concept of queue size. 
+        # Read more here: 
+        # http://wiki.ros.org/rospy/Overview/Publishers%20and%20Subscribers
+        self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2, 'pcd', 10)
+        timer_period = 1/30.0
+        self.timer = self.create_timer(timer_period, self.timer_callback)
+
+        # This rotation matrix is used for visualization purposes. It rotates
+        # the point cloud on each timer callback. 
+        self.R = o3d.geometry.get_rotation_matrix_from_xyz([0, 0, np.pi/48])
+
+              
+                
+    def timer_callback(self):
+        # For visualization purposes, I rotate the point cloud with self.R 
+        # to make it spin. 
+        self.points = self.points
+        # Here I use the point_cloud() function to convert the numpy array 
+        # into a sensor_msgs.PointCloud2 object. The second argument is the 
+        # name of the frame the point cloud will be represented in. The default
+        # (fixed) frame in RViz is called 'map'
+        self.pcd = point_cloud(self.points, 'map')
+        # Then I publish the PointCloud2 object 
+        self.pcd_publisher.publish(self.pcd)
+
+def point_cloud(points, parent_frame):
+    """ Creates a point cloud message.
+    Args:
+        points: Nx3 array of xyz positions.
+        parent_frame: frame in which the point cloud is defined
+    Returns:
+        sensor_msgs/PointCloud2 message
+
+    Code source:
+        https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0
+
+    References:
+        http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html
+        http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html
+        http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html
+
+    """
+    # In a PointCloud2 message, the point cloud is stored as an byte 
+    # array. In order to unpack it, we also include some parameters 
+    # which desribes the size of each individual point.
+    ros_dtype = sensor_msgs.PointField.FLOAT32
+    dtype = np.float32
+    itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
+
+    data = points.astype(dtype).tobytes() 
+
+    # The fields specify what the bytes represents. The first 4 bytes 
+    # represents the x-coordinate, the next 4 the y-coordinate, etc.
+    fields = [sensor_msgs.PointField(
+        name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
+        for i, n in enumerate('xyz')]
+
+    # The PointCloud2 message also has a header which specifies which 
+    # coordinate frame it is represented in. 
+    header = std_msgs.Header(frame_id=parent_frame)
+
+    return sensor_msgs.PointCloud2(
+        header=header,
+        height=1, 
+        width=points.shape[0],
+        is_dense=False,
+        is_bigendian=False,
+        fields=fields,
+        point_step=(itemsize * 3), # Every point consists of three float32s.
+        row_step=(itemsize * 3 * points.shape[0]),
+        data=data
+    )
+
+def main(args=None):
+    # Boilerplate code.
+    rclpy.init(args=args)
+    pcd_publisher = PCDPublisher()
+    rclpy.spin(pcd_publisher)
+    
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    pcd_publisher.destroy_node()
+    rclpy.shutdown()
+
+if __name__ == '__main__':
+    main()
diff --git a/nerf_sim/pose_generator.py b/nerf_sim/pose_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..5daeadc9636736e864b960204652246f2e54ee68
--- /dev/null
+++ b/nerf_sim/pose_generator.py
@@ -0,0 +1,59 @@
+import rclpy
+from rclpy.node import Node
+import json
+import numpy as np
+
+from geometry_msgs.msg import Pose
+from scipy.spatial.transform import Rotation as R
+
+# Pose Generation is for Testing Purposes Only - Takes pose from file, converts to msg format and publishes it to topic
+
+class MinimalPublisher(Node):
+
+    def __init__(self):
+        super().__init__('minimal_publisher')
+        self.publisher_ = self.create_publisher(Pose, 'pose', 1)
+        timer_period = 10  # seconds
+        self.timer = self.create_timer(timer_period, self.timer_callback)
+        self.i = 0
+
+        orig = open('/workspace/ros2_iron/src/nerf_sim/test_materials/left_fisheye.json', 'r')
+        orig_json = json.load(orig)
+        
+        # testing using a single pose only
+        self.pose = np.matrix(orig_json['camera_path'][0]['camera_to_world'])
+
+    def timer_callback(self):
+        msg = Pose()
+        msg.position.x = self.pose[0,3] + 0.5
+        msg.position.y = self.pose[1,3]
+        msg.position.z = self.pose[2,3]
+        
+        rot_mat = self.pose[0:3,0:3]
+        r = R.from_matrix(rot_mat)
+        quat = r.as_quat()
+        
+        msg.orientation.x = quat[0]
+        msg.orientation.y = quat[1]
+        msg.orientation.z = quat[2]
+        msg.orientation.w = quat[3]
+        self.publisher_.publish(msg)
+        self.get_logger().info('Publishing: "%s"' % msg)
+
+
+def main(args=None):
+    rclpy.init(args=args)
+
+    minimal_publisher = MinimalPublisher()
+
+    rclpy.spin(minimal_publisher)
+
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    minimal_publisher.destroy_node()
+    rclpy.shutdown()
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/nerf_sim/sim_depth_generator.py b/nerf_sim/sim_depth_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..30e067e780556fc08cb19b56cc665f1f757cc345
--- /dev/null
+++ b/nerf_sim/sim_depth_generator.py
@@ -0,0 +1,106 @@
+import rclpy
+from rclpy.node import Node
+
+from cv_bridge import CvBridge
+from sensor_msgs.msg import Image
+from geometry_msgs.msg import Pose
+from scipy.spatial.transform import Rotation as R
+from nerf_sim.nerf_render import RenderCameraPath
+
+import cv2
+import json
+import numpy as np
+import mediapy as media
+
+
+# subscribe to pose coming in from topic
+# write out config file for nerf
+# run nerf to get image
+# publish image to topic
+
+class MinimalPublisher(Node):
+    def __init__(self):
+        super().__init__('minimal_publisher')
+        # subscribe to pose coming in from topic
+        self.subscription = self.create_subscription(Pose, 'pose', self.listener_callback, 10)
+        self.subscription
+        
+        self.publisher_rgb = self.create_publisher(Image, 'camera/depth', 10)
+        timer_period = 10  # seconds
+        self.timer = self.create_timer(timer_period, self.timer_callback)
+        self.i = 0
+        self.bridge = CvBridge()
+        self.img_depth = None
+                
+        self.render = RenderCameraPath()
+
+    def listener_callback(self, data):
+        matrix = np.matrix(np.zeros((4,4)))
+        
+        # convert to 4x4 matrix
+        matrix[0,3] = data.position.x
+        matrix[1,3] = data.position.y
+        matrix[2,3] = data.position.z
+        matrix[3,3] = 1
+        
+        r = R.from_quat([data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w])
+        rot_mat = r.as_matrix()
+        matrix[0:3,0:3] = rot_mat
+        matrix[3,3] = 1
+        
+        # write out config file for nerf 
+        nerf_info = {
+            "camera_type": "perspective",
+            "render_height": 480,
+            "render_width": 640,
+            "camera_path": [
+                {
+                    "fov": 70,
+                    "aspect": 1.33333333333
+                }
+            ],
+            "fps": 24,
+            "seconds": 4,
+            "smoothness_value": 0.5,
+            "is_cycle": "false",
+            "crop": None
+        }
+        
+        nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
+        json_object = json.dumps(nerf_info, indent=4)
+        
+        with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile:
+            outfile.write(json_object)
+
+        self.img_depth = self.render.run(['depth'])
+        self.img_depth *= 255
+        self.img_depth = cv2.cvtColor(self.img_depth, cv2.COLOR_RGB2BGR)
+
+    
+    def timer_callback(self):
+        if self.img_depth is None:
+            return
+        
+        msg_rgb = Image()
+        a = np.array(self.img_depth, dtype='i1')
+        msg_rgb = self.bridge.cv2_to_imgmsg(a)
+        self.publisher_rgb.publish(msg_rgb)
+        self.img_depth = None
+
+
+def main(args=None):
+    rclpy.init(args=args)
+
+    minimal_publisher = MinimalPublisher()
+
+    rclpy.spin(minimal_publisher)
+
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    minimal_publisher.destroy_node()
+    rclpy.shutdown()
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/nerf_sim/sim_rgb_generator.py b/nerf_sim/sim_rgb_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b90c2ed76561dd1134d89dc583b8249b91bcac3c
--- /dev/null
+++ b/nerf_sim/sim_rgb_generator.py
@@ -0,0 +1,106 @@
+import rclpy
+from rclpy.node import Node
+
+from cv_bridge import CvBridge
+from sensor_msgs.msg import Image
+from geometry_msgs.msg import Pose
+from scipy.spatial.transform import Rotation as R
+from nerf_sim.nerf_render import RenderCameraPath
+
+import cv2
+import json
+import numpy as np
+import mediapy as media
+
+
+# subscribe to pose coming in from topic
+# write out config file for nerf
+# run nerf to get image
+# publish image to topic
+
+class MinimalPublisher(Node):
+    def __init__(self):
+        super().__init__('minimal_publisher')
+        # subscribe to pose coming in from topic
+        self.subscription = self.create_subscription(Pose, 'pose', self.listener_callback, 10)
+        self.subscription
+        
+        self.publisher_rgb = self.create_publisher(Image, 'camera/rgb', 10)
+        timer_period = 10  # seconds
+        self.timer = self.create_timer(timer_period, self.timer_callback)
+        self.i = 0
+        self.bridge = CvBridge()
+        self.img_rgb = None
+                
+        self.render = RenderCameraPath()
+
+    def listener_callback(self, data):
+        matrix = np.matrix(np.zeros((4,4)))
+        
+        # convert to 4x4 matrix
+        matrix[0,3] = data.position.x
+        matrix[1,3] = data.position.y
+        matrix[2,3] = data.position.z
+        matrix[3,3] = 1
+        
+        r = R.from_quat([data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w])
+        rot_mat = r.as_matrix()
+        matrix[0:3,0:3] = rot_mat
+        matrix[3,3] = 1
+        
+        # write out config file for nerf 
+        nerf_info = {
+            "camera_type": "perspective",
+            "render_height": 480,
+            "render_width": 640,
+            "camera_path": [
+                {
+                    "fov": 70,
+                    "aspect": 1.33333333333
+                }
+            ],
+            "fps": 24,
+            "seconds": 4,
+            "smoothness_value": 0.5,
+            "is_cycle": "false",
+            "crop": None
+        }
+        
+        nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
+        json_object = json.dumps(nerf_info, indent=4)
+        
+        with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile:
+            outfile.write(json_object)
+
+        self.img_rgb = self.render.run(['rgb'])
+        self.img_rgb *= 255
+        self.img_rgb = cv2.cvtColor(self.img_rgb, cv2.COLOR_RGB2BGR)
+
+    
+    def timer_callback(self):
+        if self.img_rgb is None:
+            return
+        
+        msg_rgb = Image()
+        a = np.array(self.img_rgb, dtype='i1')
+        msg_rgb = self.bridge.cv2_to_imgmsg(a)
+        self.publisher_rgb.publish(msg_rgb)
+        self.img_rgb = None
+
+
+def main(args=None):
+    rclpy.init(args=args)
+
+    minimal_publisher = MinimalPublisher()
+
+    rclpy.spin(minimal_publisher)
+
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    minimal_publisher.destroy_node()
+    rclpy.shutdown()
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/nerf_sim/simple_image_generator.py b/nerf_sim/simple_image_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4046ef67ac9534ebfd43d94d2890ca4600011e3
--- /dev/null
+++ b/nerf_sim/simple_image_generator.py
@@ -0,0 +1,46 @@
+import rclpy
+from rclpy.node import Node
+
+from cv_bridge import CvBridge
+from sensor_msgs.msg import Image
+import cv2
+import numpy as np
+
+# simple example of publishing an image of a single frame saved in a file
+class MinimalPublisher(Node):
+    def __init__(self):
+        super().__init__('minimal_publisher')
+        self.publisher_ = self.create_publisher(Image, 'simple_camera', 10)
+        timer_period = 0.5  # seconds
+        self.timer = self.create_timer(timer_period, self.timer_callback)
+        self.i = 0
+        
+        self.cv_image1 = cv2.imread('/workspace/ros2_iron/src/nerf_sim/test_materials/00000.jpg')
+        self.bridge = CvBridge()
+        self.get_logger().info('Test')
+
+
+    def timer_callback(self):
+        msg = Image()
+        msg = self.bridge.cv2_to_imgmsg(np.array(self.cv_image1))
+        self.publisher_.publish(msg)
+        self.get_logger().info('Publishing: Image' % msg.data)
+        self.i += 1
+
+
+def main(args=None):
+    rclpy.init(args=args)
+
+    minimal_publisher = MinimalPublisher()
+
+    rclpy.spin(minimal_publisher)
+
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    minimal_publisher.destroy_node()
+    rclpy.shutdown()
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/nerf_sim/simple_image_listener.py b/nerf_sim/simple_image_listener.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e95fff82e89030781dda40ee865fed20487923f
--- /dev/null
+++ b/nerf_sim/simple_image_listener.py
@@ -0,0 +1,44 @@
+import rclpy
+from rclpy.node import Node
+
+from cv_bridge import CvBridge
+from sensor_msgs.msg import Image
+import cv2
+import numpy as np
+import mediapy as media
+
+# simple example of subscribing to an image on a topic
+# can also be used for rviz visualization for real use
+class MinimalSubscriber(Node):
+    def __init__(self):
+        super().__init__('minimal_subscriber')
+        self.subscription = self.create_subscription(Image, 'topic', self.listener_callback, 10)
+        self.subscription
+        self.bridge = CvBridge()
+
+
+    def listener_callback(self, data):
+        image = self.bridge.imgmsg_to_cv2(data)
+        print(image.shape)
+        media.write_image(f"new-test.jpg", image, fmt="jpeg")
+
+        #cv2.imwrite('test.jpg', image)
+        self.get_logger().info('Subscribing: Image')
+
+
+def main(args=None):
+    rclpy.init(args=args)
+
+    minimal_publisher = MinimalSubscriber()
+
+    rclpy.spin(minimal_publisher)
+
+    # Destroy the node explicitly
+    # (optional - otherwise it will be done automatically
+    # when the garbage collector destroys the node object)
+    minimal_publisher.destroy_node()
+    rclpy.shutdown()
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/package.xml b/package.xml
new file mode 100644
index 0000000000000000000000000000000000000000..86e7b6c13c0a1917215be7cf3273d56a1621602e
--- /dev/null
+++ b/package.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
+<package format="3">
+  <name>nerf_sim</name>
+  <version>0.0.0</version>
+  <description>TODO: Package description</description>
+  <maintainer email="root@todo.todo">root</maintainer>
+  <license>Apache-2.0</license>
+
+  <exec_depend>ros2launch</exec_depend>
+
+  <test_depend>ament_copyright</test_depend>
+  <test_depend>ament_flake8</test_depend>
+  <test_depend>ament_pep257</test_depend>
+  <test_depend>python3-pytest</test_depend>
+
+  <export>
+    <build_type>ament_python</build_type>
+  </export>
+</package>
diff --git a/resource/nerf_sim b/resource/nerf_sim
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..782cfe5f9364762553dcfa8c13d461e7dd9397b7
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+[develop]
+script_dir=$base/lib/nerf_sim
+[install]
+install_scripts=$base/lib/nerf_sim
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..22999b4ae3915c9b22716b96a72508fef2ff3280
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,36 @@
+from setuptools import find_packages, setup
+import os
+from glob import glob
+package_name = 'nerf_sim'
+
+setup(
+    name=package_name,
+    version='0.0.0',
+    packages=find_packages(exclude=['test']),
+    data_files=[
+        ('share/ament_index/resource_index/packages',
+            ['resource/' + package_name]),
+        ('share/' + package_name, ['package.xml']),
+        # During installation, we need to copy the launch files
+        (os.path.join('share', package_name, "launch"), glob('launch/*.launch.py')),
+        # Same with the RViz configuration file.
+        (os.path.join('share', package_name, "config"), glob('config/*')),
+    ],
+    install_requires=['setuptools'],
+    zip_safe=True,
+    maintainer='root',
+    maintainer_email='root@todo.todo',
+    description='TODO: Package description',
+    license='Apache-2.0',
+    tests_require=['pytest'],
+    entry_points={
+        'console_scripts': [
+            'pose_generator = nerf_sim.pose_generator:main',
+            'image_generator = nerf_sim.simple_image_generator:main',
+            'image_listener = nerf_sim.simple_image_listener:main',
+            'sim_rgb_generator = nerf_sim.sim_rgb_generator:main',
+            'sim_depth_generator = nerf_sim.sim_depth_generator:main',
+            'pcd_generator = nerf_sim.pcd_publisher_node:main',
+        ],
+    },
+)
diff --git a/test/test_copyright.py b/test/test_copyright.py
new file mode 100644
index 0000000000000000000000000000000000000000..97a39196e84db97954341162a6d2e7f771d938c0
--- /dev/null
+++ b/test/test_copyright.py
@@ -0,0 +1,25 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_copyright.main import main
+import pytest
+
+
+# Remove the `skip` decorator once the source file(s) have a copyright header
+@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.')
+@pytest.mark.copyright
+@pytest.mark.linter
+def test_copyright():
+    rc = main(argv=['.', 'test'])
+    assert rc == 0, 'Found errors'
diff --git a/test/test_flake8.py b/test/test_flake8.py
new file mode 100644
index 0000000000000000000000000000000000000000..27ee1078ff077cc3a0fec75b7d023101a68164d1
--- /dev/null
+++ b/test/test_flake8.py
@@ -0,0 +1,25 @@
+# Copyright 2017 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_flake8.main import main_with_errors
+import pytest
+
+
+@pytest.mark.flake8
+@pytest.mark.linter
+def test_flake8():
+    rc, errors = main_with_errors(argv=[])
+    assert rc == 0, \
+        'Found %d code style errors / warnings:\n' % len(errors) + \
+        '\n'.join(errors)
diff --git a/test/test_pep257.py b/test/test_pep257.py
new file mode 100644
index 0000000000000000000000000000000000000000..b234a3840f4c5bd38f043638c8622b8f240e1185
--- /dev/null
+++ b/test/test_pep257.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Open Source Robotics Foundation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ament_pep257.main import main
+import pytest
+
+
+@pytest.mark.linter
+@pytest.mark.pep257
+def test_pep257():
+    rc = main(argv=['.', 'test'])
+    assert rc == 0, 'Found code style errors / warnings'
diff --git a/test_materials/00000.jpg b/test_materials/00000.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..111f12e855dd7f64e887be2cd9dfba706f72eac6
Binary files /dev/null and b/test_materials/00000.jpg differ
diff --git a/test_materials/left_fisheye.json b/test_materials/left_fisheye.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c5264b38043f4018a1defbecab1ec47aee9bfbe
--- /dev/null
+++ b/test_materials/left_fisheye.json
@@ -0,0 +1,43 @@
+{
+    "camera_type": "perspective",
+    "render_height": 480,
+    "render_width": 640,
+    "camera_path": [
+        {
+            "camera_to_world": [
+                [
+                    -0.7712575118708358,
+                    0.15132428740157347,
+                    -0.6182741780720471,
+                    -0.126031247353622
+                ],
+                [
+                    -0.6360692942400918,
+                    -0.21990709008089557,
+                    0.7396329230555223,
+                    0.009873900166690274
+                ],
+                [
+                    -0.024038461336182593,
+                    0.9637125350711779,
+                    0.26585770378602064,
+                    -0.07500160249766082
+                ],
+                [
+                    0.0,
+                    0.0,
+                    0.0,
+                    1.0
+                ]
+            ],
+            "fov": 70,
+            "aspect": 1.33333333333
+
+        }
+    ],
+    "fps": 24,
+    "seconds": 4,
+    "smoothness_value": 0.5,
+    "is_cycle": "false",
+    "crop": null
+}
diff --git a/test_materials/point_cloud.ply b/test_materials/point_cloud.ply
new file mode 100644
index 0000000000000000000000000000000000000000..876e53ae46ca5e85f9695b5b0a5b81fce5ac66d5
Binary files /dev/null and b/test_materials/point_cloud.ply differ