diff --git a/ROS Nodes/dataset_publisher.py b/ROS Nodes/dataset_publisher.py
index 74d9e8f75deeac4267dabcea3261513dc29b060a..ab9e07b1d29da522e81de90886361152fe2f29e3 100644
--- a/ROS Nodes/dataset_publisher.py	
+++ b/ROS Nodes/dataset_publisher.py	
@@ -11,7 +11,7 @@ import random
 def read_data():
         image_paths = []
         poses = []
-        PATH = '/home/shania/catkin_ws/src/android/scripts/KingsCollege'
+        PATH = '/home/shania/catkin_ws/src/android/scripts/datasets/KingsCollege'
         file_name = 'dataset_test.txt'
         with open("/".join([PATH, file_name])) as f:
             lines = f.readlines()
diff --git a/ROS Nodes/draw_sequence.py b/ROS Nodes/draw_sequence.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37eab2d77435d85d577c34767530d44c9c35e7b
--- /dev/null
+++ b/ROS Nodes/draw_sequence.py	
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+
+import rospy
+import tf
+from nav_msgs.msg import Path
+from geometry_msgs.msg import PoseStamped
+
+
+def read_data():
+	image_paths = []
+	poses = []
+	PATH = '/home/shania/catkin_ws/src/android/scripts/datasets/TurtlebotTourCVSSP/'
+	file_name = 'metadata.txt'
+	with open("/".join([PATH, file_name])) as f:
+		lines = f.readlines()
+	for i in range(3, len(lines)):  # skipping first 3 lines
+		data = lines[i].split()
+		image_paths.append("/".join([PATH, data[0]]))
+		poses.append([float(x) for x in data[1:]])
+    
+	return image_paths, poses
+        
+        
+def create_path_message():
+	# Initialize a new Path message
+	path_msg = Path()    	
+	
+		# Extract poses and add them to the path
+	_, poses = read_data()
+	for i in range(len(poses)):
+		# print(poses[i])
+		pose_msg = PoseStamped()
+		pose_msg.header.stamp = rospy.Time.now()  # Set the timestamp
+		pose_msg.header.frame_id = "map-gt" 
+		pose_msg.pose.position.x = poses[i][0]  # X coordinate
+		pose_msg.pose.position.y = poses[i][1]  # Y coordinate
+		pose_msg.pose.position.z = poses[i][2]  # Z coordinate
+		# You can set orientation if needed
+		pose_msg.pose.orientation.x = poses[i][3]
+		pose_msg.pose.orientation.y = poses[i][4]
+		pose_msg.pose.orientation.z = poses[i][5]
+		pose_msg.pose.orientation.w = poses[i][6]
+        
+        # Add the pose to the path
+		# print(pose_msg)
+		path_msg.poses.append(pose_msg)
+	
+	path_msg.header.stamp = rospy.Time.now()  # Set the timestamp
+	path_msg.header.frame_id = "map-gt" 
+	
+	# Apply transform to path message	
+	# t_msg = listener.transformPose("map", path_msg)  
+	# t_msg.header.stamp = rospy.Time.now()
+    
+	return path_msg
+
+
+if __name__ == '__main__':
+	rospy.init_node('path_publisher')
+    
+	# Create a publisher for the path message
+	path_publisher = rospy.Publisher('/path_topic', Path, queue_size=10)
+    
+	rate = rospy.Rate(1)  # 1 Hz
+    
+	while not rospy.is_shutdown():
+		# Create and publish the path message
+		# listener = tf.TransformListener()  # Don't need this as a transform is running, all we need is to state the original frame when publishing
+		path_msg = create_path_message()
+		path_publisher.publish(path_msg)
+		rospy.loginfo("Path message published.")
+		rate.sleep()
+
diff --git a/ROS Nodes/models.py b/ROS Nodes/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bc9984210667e6e57177303bfd2985d04f2e4e9
--- /dev/null
+++ b/ROS Nodes/models.py	
@@ -0,0 +1,43 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision import models
+
+
+# https://github.com/NVlabs/geomapnet/blob/master/models/posenet.py
+class PoseNet(nn.Module):
+    def __init__(self, droprate=0.5, pretrained=True):
+        super(PoseNet, self).__init__()
+        self.droprate = droprate
+
+        # replace the last FC layer in feature extractor
+        self.feature_extractor = models.resnet34(weights="IMAGENET1K_V1")
+        self.feature_extractor.avgpool = nn.AdaptiveAvgPool2d(1)
+        in_dim = self.feature_extractor.fc.in_features
+        self.feature_extractor.fc = nn.Linear(in_dim, 2048)
+
+        self.fc_xyz = nn.Linear(2048, 3)
+        self.fc_wpqr = nn.Linear(2048, 4)
+
+        # initialize
+        if pretrained:
+            init_modules = [self.feature_extractor.fc, self.fc_xyz, self.fc_wpqr]
+        else:
+            init_modules = self.modules()
+
+        for m in init_modules:
+            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
+                nn.init.kaiming_normal_(m.weight.data)
+                if m.bias is not None:
+                    nn.init.constant_(m.bias.data, 0)
+
+    def forward(self, x):
+        x = self.feature_extractor(x)
+        # 1 x 2048
+        x = F.relu(x)
+        if self.droprate > 0:
+            x = F.dropout(x, p=self.droprate)
+
+        xyz = self.fc_xyz(x)
+        wpqr = self.fc_wpqr(x)
+        return xyz, wpqr
diff --git a/ROS Nodes/photo_subscriber.py b/ROS Nodes/photo_subscriber.py
deleted file mode 100644
index dd8a9594dec4ea5a4b88dd435af65f49b0b99737..0000000000000000000000000000000000000000
--- a/ROS Nodes/photo_subscriber.py	
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python3
-
-import rospy
-from sensor_msgs.msg import Image
-from cv_bridge import CvBridge
-import cv2
-
-def callback(data):
-	print("Received data of type: ", type(data))
-	br = CvBridge()
-	data = br.imgmsg_to_cv2(data, "bgr8")
-	print("Converted to type: ", type(data))
-	cv2.imshow("Smartphone Image", data)
-	
-	cv2.waitKey(0)
-	 
-
-if __name__ == '__main__':
-	rospy.init_node("photo_subscriber")
-	sub = rospy.Subscriber("/photo", Image, callback)
-	rospy.loginfo("Photo Node started")
-	
-	while not rospy.is_shutdown():
-		rospy.spin()
-	cv2.destroyAllWindows()
-	
diff --git a/ROS Nodes/pose_publisher.py b/ROS Nodes/pose_publisher.py
index 5e412a24cbddb74cd5e58e1cb9bde6390132915b..fbc7490636d8293a6ba4b19bd3871c37bd4b46d7 100644
--- a/ROS Nodes/pose_publisher.py	
+++ b/ROS Nodes/pose_publisher.py	
@@ -8,6 +8,7 @@ from geometry_msgs.msg import PoseStamped
 from cv_bridge import CvBridge
 from PIL import Image as Img
 import cv2
+import tf
 
 import torch
 from torchvision import transforms
@@ -15,6 +16,7 @@ import torch.nn.functional as F
 
 import models
 
+listener = None
 
 def image_callback(data):
 	# ROS Format to cv2 
@@ -40,8 +42,8 @@ def image_callback(data):
 	print("INPUT: ", photo.shape)
 	
 	# MODEL
-	model = models.ResNet()
-	PATH = '/home/shania/catkin_ws/src/android/scripts/model.pth'
+	model = models.PoseNet()
+	PATH = '/home/shania/catkin_ws/src/android/scripts/posenet_300.pth'
 	model.load_state_dict(torch.load(PATH))
 	model.eval()
 		
@@ -52,23 +54,33 @@ def image_callback(data):
 	print(pos_out, ori_out)	
 	
 	msg = PoseStamped()  # geometry pose stamped
-	msg.header.stamp = rospy.Time.now()
-	msg.header.frame_id = "pose"
-	
+	msg.header.stamp = rospy.Time(0)
+	msg.header.frame_id = "map-gt"  # this is gt coordinate frame; we then use a node to transform it from map-gt to map
 	msg.pose.position.x = pos_out[0]
 	msg.pose.position.y = pos_out[1]
-	msg.pose.position.z = pos_out[2]
-	msg.pose.orientation.x = ori_out[0]
-	msg.pose.orientation.y = ori_out[1]
-	msg.pose.orientation.z = ori_out[2]
-	msg.pose.orientation.w = ori_out[3]	
+	msg.pose.position.z = 0.0
+	msg.pose.orientation.x = 0
+	msg.pose.orientation.y = 0
+	msg.pose.orientation.z = 0
+	msg.pose.orientation.w = 1
+	
+	# t_msg = listener.transformPose("butthead/map", msg)  # listen to any transforms being broadcast in ROS and convert to this frame; the pubished pose is in the frame butthead/map
+	# NOT CORRECT
+	# Don't need this as a transform is running, all we need is to state the original frame when publishing, rosrun tf handles the rest
+	# t_msg.header.stamp = rospy.Time.now()
+	# t_msg.pose.position.z = 0
+	# t_msg.pose.orientation.x = 0
+	# t_msg.pose.orientation.y = 0
+	# t_msg.pose.orientation.z = 0
+	# t_msg.pose.orientation.w = 1
+	
 	pub.publish(msg)  
 	
 
 if __name__ == '__main__':
 	rospy.init_node("pose_publisher")
-	
-	pub = rospy.Publisher("/camera_pose", PoseStamped, queue_size=10)
+	# listener = tf.TransformListener()
+	pub = rospy.Publisher("/butthead/move_base_simple/goal", PoseStamped, queue_size=10)
 	sub = rospy.Subscriber("/photo", Image, callback=image_callback)
 	rospy.loginfo("Node has been started")