Skip to content
Snippets Groups Projects
Commit 1621fe82 authored by Alcolado Nuthall's avatar Alcolado Nuthall
Browse files

fix: issue with overwiting and feat: pointcloud updates

parent 980ad5fe
No related branches found
No related tags found
1 merge request!2fix: issue with overwiting and feat: pointcloud updates
{
"camera_type": "perspective",
"render_height": 480,
"render_width": 640,
"camera_path": [
{
"fov": 70,
"aspect": 1.33333333333,
"camera_to_world": [
[
-0.7712574369763269,
0.1513242754712088,
-0.6182741540464238,
0.37396875264637797
],
[
-0.6360692717618674,
-0.21990707861025355,
0.7396328537169178,
0.009873900166690274
],
[
-0.024038457293159843,
0.9637125299746899,
0.26585769152075983,
-0.07500160249766082
],
[
0.0,
0.0,
0.0,
1.0
]
]
}
],
"fps": 24,
"seconds": 4,
"smoothness_value": 0.5,
"is_cycle": "false",
"crop": null
}
\ No newline at end of file
{
"camera_type": "perspective",
"render_height": 480,
"render_width": 640,
"camera_path": [
{
"fov": 70,
"aspect": 1.33333333333,
"camera_to_world": [
[
-0.7712574369763269,
0.1513242754712088,
-0.6182741540464238,
0.37396875264637797
],
[
-0.6360692717618674,
-0.21990707861025355,
0.7396328537169178,
0.009873900166690274
],
[
-0.024038457293159843,
0.9637125299746899,
0.26585769152075983,
-0.07500160249766082
],
[
0.0,
0.0,
0.0,
1.0
]
]
}
],
"fps": 24,
"seconds": 4,
"smoothness_value": 0.5,
"is_cycle": "false",
"crop": null
}
\ No newline at end of file
No preview for this file type
No preview for this file type
File added
No preview for this file type
No preview for this file type
...@@ -96,21 +96,20 @@ class RenderCameraPath(): ...@@ -96,21 +96,20 @@ class RenderCameraPath():
load_config: Path load_config: Path
output_path: Path = Path("renders/output.mp4") output_path: Path = Path("renders/output.mp4")
output_format: Literal["images", "video"] = "images" output_format: Literal["images", "video"] = "images"
camera_path_filename: Path = Path("camera_path.json")
rendered_output_names: List[str] = [] rendered_output_names: List[str] = []
def __init__(self): def __init__(self):
self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml') self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml')
self.camera_path_filename = Path('/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json')
_, self.pipeline, _, _ = eval_setup( _, self.pipeline, _, _ = eval_setup(
self.load_config, self.load_config,
eval_num_rays_per_chunk=self.eval_num_rays_per_chunk, eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
test_mode="inference", test_mode="inference",
) )
def run(self, render_output_names: List[str] = []): def run(self, render_output_names: List[str] = [], camera_path_filename=Path("renders/camera_path.json")):
pipeline = self.pipeline pipeline = self.pipeline
with open(self.camera_path_filename, "r", encoding="utf-8") as f: camera_path_filename = Path(camera_path_filename)
with open(camera_path_filename, "r", encoding="utf-8") as f:
camera_path = json.load(f) camera_path = json.load(f)
seconds = camera_path["seconds"] seconds = camera_path["seconds"]
crop_data = None # we're not cropping the image crop_data = None # we're not cropping the image
......
...@@ -6,6 +6,8 @@ import rclpy ...@@ -6,6 +6,8 @@ import rclpy
from rclpy.node import Node from rclpy.node import Node
import sensor_msgs.msg as sensor_msgs import sensor_msgs.msg as sensor_msgs
import std_msgs.msg as std_msgs import std_msgs.msg as std_msgs
from nerf_sim.render_lidar import RenderLiDAR
from geometry_msgs.msg import Pose
import numpy as np import numpy as np
import open3d as o3d import open3d as o3d
...@@ -14,42 +16,21 @@ class PCDPublisher(Node): ...@@ -14,42 +16,21 @@ class PCDPublisher(Node):
def __init__(self): def __init__(self):
super().__init__('pcd_publisher_node') super().__init__('pcd_publisher_node')
pcd_path = '/workspace/ros2_iron/src/nerf_sim/test_materials/point_cloud.ply'
# I use Open3D to read point clouds and meshes. It's a great library!
pcd = o3d.io.read_point_cloud(pcd_path)
# I then convert it into a numpy array.
self.points = np.asarray(pcd.points)
print(self.points.shape)
# I create a publisher that publishes sensor_msgs.PointCloud2 to the
# topic 'pcd'. The value '10' refers to the history_depth, which I
# believe is related to the ROS1 concept of queue size.
# Read more here:
# http://wiki.ros.org/rospy/Overview/Publishers%20and%20Subscribers
self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2, 'pcd', 10) self.pcd_publisher = self.create_publisher(sensor_msgs.PointCloud2, 'pcd', 10)
timer_period = 1/30.0
self.timer = self.create_timer(timer_period, self.timer_callback) # subscribe to pose coming in from topic
self.subscription = self.create_subscription(Pose, 'pose', self.listener_callback, 10)
# This rotation matrix is used for visualization purposes. It rotates self.subscription
# the point cloud on each timer callback.
self.R = o3d.geometry.get_rotation_matrix_from_xyz([0, 0, np.pi/48]) self.render = RenderLiDAR()
def listener_callback(self, data):
pcd = self.render.run([data.position.x, data.position.y, data.position.z])
def timer_callback(self): self.points = pcd
# For visualization purposes, I rotate the point cloud with self.R
# to make it spin.
self.points = self.points
# Here I use the point_cloud() function to convert the numpy array
# into a sensor_msgs.PointCloud2 object. The second argument is the
# name of the frame the point cloud will be represented in. The default
# (fixed) frame in RViz is called 'map'
self.pcd = point_cloud(self.points, 'map') self.pcd = point_cloud(self.points, 'map')
# Then I publish the PointCloud2 object
self.pcd_publisher.publish(self.pcd) self.pcd_publisher.publish(self.pcd)
def point_cloud(points, parent_frame): def point_cloud(points, parent_frame):
""" Creates a point cloud message. """ Creates a point cloud message.
Args: Args:
...@@ -72,9 +53,12 @@ def point_cloud(points, parent_frame): ...@@ -72,9 +53,12 @@ def point_cloud(points, parent_frame):
# which desribes the size of each individual point. # which desribes the size of each individual point.
ros_dtype = sensor_msgs.PointField.FLOAT32 ros_dtype = sensor_msgs.PointField.FLOAT32
dtype = np.float32 dtype = np.float32
itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes. itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
data = points.astype(dtype).tobytes() data = np.array(points, dtype=np.float32)
data = data.tobytes()
# The fields specify what the bytes represents. The first 4 bytes # The fields specify what the bytes represents. The first 4 bytes
# represents the x-coordinate, the next 4 the y-coordinate, etc. # represents the x-coordinate, the next 4 the y-coordinate, etc.
......
import torch
import math
import random
from dataclasses import dataclass, field
from typing import Callable, Dict, Literal, Optional, Tuple, Union, overload
from pathlib import Path
import open3d as o3d
import argparse
import torch
import numpy as np
from jaxtyping import Float, Int, Shaped
from torch import Tensor
from nerfstudio.utils.eval_utils import eval_setup
from nerfstudio.model_components.ray_samplers import PDFSampler, UniformSampler
from nerfstudio.utils.tensor_dataclass import TensorDataclass
from nerfstudio.cameras.rays import RayBundle, RaySamples, Frustums
def generat_direction():
min, max = -30.67, 10.67 # view range degrees
no_channels = 32
delta_range = (max - min) / no_channels # delta degrees
elevations = [math.radians(i) for i in np.arange(min, max, delta_range)]
azimuths = [math.radians(i) for i in np.arange(0, 360, 1)]
rays = []
for elevation in elevations:
for azimuth in azimuths:
rays.append(torch.tensor([
math.cos(elevation) * math.sin(azimuth),
math.cos(elevation) * math.cos(azimuth),
math.sin(elevation)
], device='cuda:0'))
return torch.stack(rays)
def generate_pointcloud(ray_bundle, outputs):
rgbs = outputs['rgb']
points = ray_bundle.origins + ray_bundle.directions * outputs['depth']
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points.double().cpu().numpy())
pcd.colors = o3d.utility.Vector3dVector(rgbs.detach().double().cpu().numpy())
return pcd
class RenderLiDAR():
eval_num_rays_per_chunk: Optional[int] = None
def __init__(self):
self.load_config = Path('/vol/research/K9/test-colmap/65904bca-e8fd-11ed-a05b-0242ac120003/nerfacto/65904bca-e8fd-11ed-a05b-0242ac120003/config.yml')
_, self.pipeline, _, _ = eval_setup(
self.load_config,
eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
test_mode="inference",
)
def run(self, origin):
cuda0 = torch.device('cuda:0')
directions = generat_direction()
#TODO: change origin depending on pose coming in
origin = torch.tensor(origin, device=cuda0)
x,y = directions.shape
origins = origin.repeat(x,1)
area = torch.tensor([0.0001], device=cuda0)
pixel_area = area.repeat(x, 1)
camera_indices = torch.tensor([0], device=cuda0)
ray_bundle = RayBundle(origins, directions, pixel_area, camera_indices=camera_indices, nears=None, fars=None)
outputs = self.pipeline.model(ray_bundle)
pcd = generate_pointcloud(ray_bundle, outputs)
torch.cuda.empty_cache()
tpcd = o3d.t.geometry.PointCloud.from_legacy(pcd)
tpcd.point.colors = (tpcd.point.colors * 255).to(o3d.core.Dtype.UInt8) # type: ignore
return o3d.core.Tensor.numpy(tpcd.point.positions)
\ No newline at end of file
...@@ -69,10 +69,11 @@ class MinimalPublisher(Node): ...@@ -69,10 +69,11 @@ class MinimalPublisher(Node):
nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist() nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
json_object = json.dumps(nerf_info, indent=4) json_object = json.dumps(nerf_info, indent=4)
with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile: path = "/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path_depth.json"
with open(path, "w") as outfile:
outfile.write(json_object) outfile.write(json_object)
self.img_depth = self.render.run(['depth']) self.img_depth = self.render.run(['depth'], path)
self.img_depth *= 255 self.img_depth *= 255
self.img_depth = cv2.cvtColor(self.img_depth, cv2.COLOR_RGB2BGR) self.img_depth = cv2.cvtColor(self.img_depth, cv2.COLOR_RGB2BGR)
......
...@@ -69,10 +69,11 @@ class MinimalPublisher(Node): ...@@ -69,10 +69,11 @@ class MinimalPublisher(Node):
nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist() nerf_info['camera_path'][0]['camera_to_world'] = matrix.tolist()
json_object = json.dumps(nerf_info, indent=4) json_object = json.dumps(nerf_info, indent=4)
with open("/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path.json", "w") as outfile: path = "/workspace/ros2_iron/src/nerf_sim/generated_materials/camera_path_rgb.json"
with open(path, "w") as outfile:
outfile.write(json_object) outfile.write(json_object)
self.img_rgb = self.render.run(['rgb']) self.img_rgb = self.render.run(['rgb'], path)
self.img_rgb *= 255 self.img_rgb *= 255
self.img_rgb = cv2.cvtColor(self.img_rgb, cv2.COLOR_RGB2BGR) self.img_rgb = cv2.cvtColor(self.img_rgb, cv2.COLOR_RGB2BGR)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment