diff --git a/generated_materials/camera_path_depth.json b/generated_materials/camera_path_depth.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c48c13729853e5b83723ef1147f06c12ddee4d3
--- /dev/null
+++ b/generated_materials/camera_path_depth.json
@@ -0,0 +1,42 @@
+{
+    "camera_type": "perspective",
+    "render_height": 480,
+    "render_width": 640,
+    "camera_path": [
+        {
+            "fov": 70,
+            "aspect": 1.33333333333,
+            "camera_to_world": [
+                [
+                    -0.7712574369763269,
+                    0.1513242754712088,
+                    -0.6182741540464238,
+                    0.37396875264637797
+                ],
+                [
+                    -0.6360692717618674,
+                    -0.21990707861025355,
+                    0.7396328537169178,
+                    0.009873900166690274
+                ],
+                [
+                    -0.024038457293159843,
+                    0.9637125299746899,
+                    0.26585769152075983,
+                    -0.07500160249766082
+                ],
+                [
+                    0.0,
+                    0.0,
+                    0.0,
+                    1.0
+                ]
+            ]
+        }
+    ],
+    "fps": 24,
+    "seconds": 4,
+    "smoothness_value": 0.5,
+    "is_cycle": "false",
+    "crop": null
+}
\ No newline at end of file
diff --git a/generated_materials/camera_path_rgb.json b/generated_materials/camera_path_rgb.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c48c13729853e5b83723ef1147f06c12ddee4d3
--- /dev/null
+++ b/generated_materials/camera_path_rgb.json
@@ -0,0 +1,42 @@
+{
+    "camera_type": "perspective",
+    "render_height": 480,
+    "render_width": 640,
+    "camera_path": [
+        {
+            "fov": 70,
+            "aspect": 1.33333333333,
+            "camera_to_world": [
+                [
+                    -0.7712574369763269,
+                    0.1513242754712088,
+                    -0.6182741540464238,
+                    0.37396875264637797
+                ],
+                [
+                    -0.6360692717618674,
+                    -0.21990707861025355,
+                    0.7396328537169178,
+                    0.009873900166690274
+                ],
+                [
+                    -0.024038457293159843,
+                    0.9637125299746899,
+                    0.26585769152075983,
+                    -0.07500160249766082
+                ],
+                [
+                    0.0,
+                    0.0,
+                    0.0,
+                    1.0
+                ]
+            ]
+        }
+    ],
+    "fps": 24,
+    "seconds": 4,
+    "smoothness_value": 0.5,
+    "is_cycle": "false",
+    "crop": null
+}
\ No newline at end of file
diff --git a/nerf_sim/__pycache__/nerf_render.cpython-310.pyc b/nerf_sim/__pycache__/nerf_render.cpython-310.pyc
index 67c8120d4b5521aa01e2737672e9544f2e1be06d..7b6193e84355ea083960aac2af291f6d80411378 100644
Binary files a/nerf_sim/__pycache__/nerf_render.cpython-310.pyc and b/nerf_sim/__pycache__/nerf_render.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc b/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc
index 4cea66041ff744a63bec567f794f74aae6112038..c3bece276be9d80d65c3276c56c3ef7c5e6b36ec 100644
Binary files a/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc and b/nerf_sim/__pycache__/pcd_publisher_node.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/render_lidar.cpython-310.pyc b/nerf_sim/__pycache__/render_lidar.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b60e6de2f8acc2f8db6c5157b35032574502dfa
Binary files /dev/null and b/nerf_sim/__pycache__/render_lidar.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc b/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc
index b2cda0000a9ab877dc1bc2beee547aa3b5f2a29b..2bf63e627516f78f0525992fe760c9869edf6d43 100644
Binary files a/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc and b/nerf_sim/__pycache__/sim_depth_generator.cpython-310.pyc differ
diff --git a/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc b/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc
index ac3c028df3e5426c0c50f9664ec583fa1895d07a..c6dc5064dc764c4c96e1a82dd5af22683aa27947 100644
Binary files a/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc and b/nerf_sim/__pycache__/sim_rgb_generator.cpython-310.pyc differ
diff --git a/nerf_sim/nerf_render.py b/nerf_sim/nerf_render.py
index 5a4f746ffda7b0775e9a2199d11c94a3bedf722b..44406d43f3a8d4a160b4a49f09e9447d957d301c 100644
--- a/nerf_sim/nerf_render.py
+++ b/nerf_sim/nerf_render.py
@@ -96,21 +96,20 @@ class RenderCameraPath():
     load_config: Path
     output_path: Path = Path("renders/output.mp4")
     output_format: Literal["images", "video"] = "images"
-    camera_path_filename: Path = Path("camera_path.json")
     rendered_output_names: List[str] = []
     
     def __init__(self):
         self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml')
-        self.camera_path_filename = Path('/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json')
         _, self.pipeline, _, _ = eval_setup(
             self.load_config,
             eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
             test_mode="inference",
         )
 
-    def run(self, render_output_names: List[str] = []):
+    def run(self, render_output_names: List[str] = [], camera_path_filename=Path("renders/camera_path.json")):
         pipeline = self.pipeline
-        with open(self.camera_path_filename, "r", encoding="utf-8") as f:
+        camera_path_filename = Path(camera_path_filename)
+        with open(camera_path_filename, "r", encoding="utf-8") as f:
                 camera_path = json.load(f)
         seconds = camera_path["seconds"]
         crop_data = None # we're not cropping the image
diff --git a/nerf_sim/pcd_publisher_node.py b/nerf_sim/pcd_publisher_node.py
index 711fceb739e94500db9b146d8519a762cf84e31a..4b102b58fa6d3407b438271d48067a2585443dcf 100644
--- a/nerf_sim/pcd_publisher_node.py
+++ b/nerf_sim/pcd_publisher_node.py
@@ -6,6 +6,8 @@ import rclpy
 from rclpy.node import Node
 import sensor_msgs.msg as sensor_msgs
 import std_msgs.msg as std_msgs
+from nerf_sim.render_lidar import RenderLiDAR
+from geometry_msgs.msg import Pose
 
 import numpy as np
 import open3d as o3d
@@ -14,42 +16,21 @@ class PCDPublisher(Node):
 
     def __init__(self):
         super().__init__('pcd_publisher_node')
-
-        pcd_path = '/workspace/ros2_iron/src/nerf_sim/test_materials/point_cloud.ply'
-
-        # I use Open3D to read point clouds and meshes. It's a great library!
-        pcd = o3d.io.read_point_cloud(pcd_path)
-        # I then convert it into a numpy array.
-        self.points = np.asarray(pcd.points)
-        print(self.points.shape)
-        
-        # I create a publisher that publishes sensor_msgs.PointCloud2 to the 
-        # topic 'pcd'. The value '10' refers to the history_depth, which I 
-        # believe is related to the ROS1 concept of queue size. 
-        # Read more here: 
-        # http://wiki.ros.org/rospy/Overview/Publishers%20and%20Subscribers
         self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2, 'pcd', 10)
-        timer_period = 1/30.0
-        self.timer = self.create_timer(timer_period, self.timer_callback)
-
-        # This rotation matrix is used for visualization purposes. It rotates
-        # the point cloud on each timer callback. 
-        self.R = o3d.geometry.get_rotation_matrix_from_xyz([0, 0, np.pi/48])
-
-              
-                
-    def timer_callback(self):
-        # For visualization purposes, I rotate the point cloud with self.R 
-        # to make it spin. 
-        self.points = self.points
-        # Here I use the point_cloud() function to convert the numpy array 
-        # into a sensor_msgs.PointCloud2 object. The second argument is the 
-        # name of the frame the point cloud will be represented in. The default
-        # (fixed) frame in RViz is called 'map'
+        
+        # subscribe to pose coming in from topic
+        self.subscription = self.create_subscription(Pose, 'pose', self.listener_callback, 10)
+        self.subscription
+        
+        self.render = RenderLiDAR()
+
+    def listener_callback(self, data):
+        pcd = self.render.run([data.position.x, data.position.y, data.position.z])
+        self.points = pcd
         self.pcd = point_cloud(self.points, 'map')
-        # Then I publish the PointCloud2 object 
         self.pcd_publisher.publish(self.pcd)
 
+
 def point_cloud(points, parent_frame):
     """ Creates a point cloud message.
     Args:
@@ -72,9 +53,12 @@ def point_cloud(points, parent_frame):
     # which desribes the size of each individual point.
     ros_dtype = sensor_msgs.PointField.FLOAT32
     dtype = np.float32
+
     itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
 
-    data = points.astype(dtype).tobytes() 
+    data = np.array(points, dtype=np.float32)
+
+    data = data.tobytes() 
 
     # The fields specify what the bytes represents. The first 4 bytes 
     # represents the x-coordinate, the next 4 the y-coordinate, etc.
diff --git a/nerf_sim/render_lidar.py b/nerf_sim/render_lidar.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b83785adc0296caf225e13cf2ecb2af24cc62ee
--- /dev/null
+++ b/nerf_sim/render_lidar.py
@@ -0,0 +1,82 @@
+import torch
+import math
+import random
+from dataclasses import dataclass, field
+from typing import Callable, Dict, Literal, Optional, Tuple, Union, overload
+
+from pathlib import Path
+import open3d as o3d
+import argparse
+import torch
+import numpy as np
+from jaxtyping import Float, Int, Shaped
+from torch import Tensor
+from nerfstudio.utils.eval_utils import eval_setup
+from nerfstudio.model_components.ray_samplers import PDFSampler, UniformSampler
+
+from nerfstudio.utils.tensor_dataclass import TensorDataclass
+from nerfstudio.cameras.rays import RayBundle, RaySamples, Frustums
+
+def generat_direction():
+    min, max = -30.67, 10.67 # view range degrees
+    no_channels = 32
+    delta_range = (max - min) / no_channels # delta degrees
+    
+    elevations = [math.radians(i) for i in np.arange(min, max, delta_range)]
+    azimuths = [math.radians(i) for i in np.arange(0, 360, 1)]
+
+    rays = []
+    for elevation in elevations:
+        for azimuth in azimuths:
+            rays.append(torch.tensor([
+                        math.cos(elevation) * math.sin(azimuth),
+                        math.cos(elevation) * math.cos(azimuth),
+                        math.sin(elevation)
+                    ], device='cuda:0'))
+
+    return torch.stack(rays)    
+
+def generate_pointcloud(ray_bundle, outputs):
+    rgbs = outputs['rgb']
+    points = ray_bundle.origins + ray_bundle.directions * outputs['depth']
+    
+    pcd = o3d.geometry.PointCloud()
+    pcd.points = o3d.utility.Vector3dVector(points.double().cpu().numpy())
+    pcd.colors = o3d.utility.Vector3dVector(rgbs.detach().double().cpu().numpy())
+    
+    return pcd
+
+class RenderLiDAR():
+    eval_num_rays_per_chunk: Optional[int] = None
+    def __init__(self):
+        self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml')
+        _, self.pipeline, _, _ = eval_setup(
+            self.load_config,
+            eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
+            test_mode="inference",
+        )
+    def run(self, origin):
+        cuda0 = torch.device('cuda:0')
+    
+        directions = generat_direction()
+        
+        #TODO: change origin depending on pose coming in
+        origin = torch.tensor(origin, device=cuda0)
+        
+        x,y = directions.shape
+        origins = origin.repeat(x,1)
+        
+        area = torch.tensor([0.0001], device=cuda0)
+        pixel_area = area.repeat(x, 1)
+        
+        camera_indices = torch.tensor([0], device=cuda0)
+    
+        ray_bundle = RayBundle(origins, directions, pixel_area, camera_indices=camera_indices, nears=None, fars=None)
+        outputs = self.pipeline.model(ray_bundle)
+        
+        pcd = generate_pointcloud(ray_bundle, outputs)
+        
+        torch.cuda.empty_cache()
+        tpcd = o3d.t.geometry.PointCloud.from_legacy(pcd)
+        tpcd.point.colors = (tpcd.point.colors * 255).to(o3d.core.Dtype.UInt8)  # type: ignore
+        return o3d.core.Tensor.numpy(tpcd.point.positions)
\ No newline at end of file
diff --git a/nerf_sim/sim_depth_generator.py b/nerf_sim/sim_depth_generator.py
index 30e067e780556fc08cb19b56cc665f1f757cc345..6595cb75dafe7191be864264fb52e58d57b44e9e 100644
--- a/nerf_sim/sim_depth_generator.py
+++ b/nerf_sim/sim_depth_generator.py
@@ -69,10 +69,11 @@ class MinimalPublisher(Node):
         nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
         json_object = json.dumps(nerf_info, indent=4)
         
-        with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile:
+        path = "/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path_depth.json"
+        with open(path, "w") as outfile:
             outfile.write(json_object)
 
-        self.img_depth = self.render.run(['depth'])
+        self.img_depth = self.render.run(['depth'], path)
         self.img_depth *= 255
         self.img_depth = cv2.cvtColor(self.img_depth, cv2.COLOR_RGB2BGR)
 
diff --git a/nerf_sim/sim_rgb_generator.py b/nerf_sim/sim_rgb_generator.py
index b90c2ed76561dd1134d89dc583b8249b91bcac3c..9078c687cbcfa16e36d5a052e3243e1fd6ddce05 100644
--- a/nerf_sim/sim_rgb_generator.py
+++ b/nerf_sim/sim_rgb_generator.py
@@ -69,10 +69,11 @@ class MinimalPublisher(Node):
         nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
         json_object = json.dumps(nerf_info, indent=4)
         
-        with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile:
+        path = "/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path_rgb.json"
+        with open(path, "w") as outfile:
             outfile.write(json_object)
 
-        self.img_rgb = self.render.run(['rgb'])
+        self.img_rgb = self.render.run(['rgb'], path)
         self.img_rgb *= 255
         self.img_rgb = cv2.cvtColor(self.img_rgb, cv2.COLOR_RGB2BGR)