diff --git a/pose_estimation/msg/SphericalCoordinates.msg b/pose_estimation/msg/SphericalCoordinates.msg
index 7e14a368a901bfbc4913ec75a16b0e1e2ac7e86e..1bf8d76b7d15c82b5763ed1d1283b1878d411a93 100644
--- a/pose_estimation/msg/SphericalCoordinates.msg
+++ b/pose_estimation/msg/SphericalCoordinates.msg
@@ -1,3 +1,4 @@
 float64 distance
 float64 theta
-float64 phi
\ No newline at end of file
+float64 phi
+float64 wrist_angle
\ No newline at end of file
diff --git a/pose_estimation/scripts/arm_keypoint_capture.py b/pose_estimation/scripts/arm_keypoint_capture.py
index 8ec7883740efef45595e4b165517ed964d5d20cf..76d00c69c229bcbc98078c1ccf75a5e4f9803080 100755
--- a/pose_estimation/scripts/arm_keypoint_capture.py
+++ b/pose_estimation/scripts/arm_keypoint_capture.py
@@ -1,11 +1,3 @@
-############################## TODO ################################
-#
-#
-### TEST: spherical to coordinates 
-#
-# IMPROVEMENTS: How often do we update the arm? How long to move the arm? Which IKs won't be solved
-# GENERAL: Global variables, comments and structure. Dependencies (mediapipe, vg)
-####################################################################
 
 import mediapipe as mp
 import numpy as np
@@ -21,8 +13,8 @@ from mediapipe.framework.formats import landmark_pb2
 from pose_estimation.msg import SphericalCoordinates
 
 ### Replaying a pre-recorded mp4 video or using the webcam
-video_replay = True
-test_video = "test_movement.mp4"
+VIDEO_REPLAY = True
+test_video = "test_movement_2.mp4"
 VIDEO = f"{os.getcwd()}/../catkin_ws/src/pose_estimation/test/{test_video}"
 
 ### Mediapipe drawing
@@ -33,6 +25,7 @@ mp_ds = mp_drawing_styles.get_default_pose_landmarks_style()
 ### Mediapipe Pose Estimation
 mp_pose = mp.solutions.pose
 
+# TODO: Change to identity
 ### Unit axis vectors
 x_axis = np.array([1, 0, 0])
 y_axis = np.array([0, 1, 0])
@@ -41,18 +34,19 @@ z_axis = np.array([0, 0, 1])
 # TODO: Define in the other file (prove mathematically)
 ### Ranges 
 mp_distance_range = (0.0,0.53)
-robot_distance_range = (0.14, 0.46)
+robot_distance_range = (0.14, 0.35)
 
 ### Threshold
-VISIBILITY_THRESHOLD = 0.2
+VISIBILITY_THRESHOLD = 0.1
 
 ### Arm Landmarks
 wrist_keypoint = mp_pose.PoseLandmark.RIGHT_WRIST.value
+elbow_keypoint = mp_pose.PoseLandmark.RIGHT_ELBOW.value
 shoulder_keypoint = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
 
 arm_landmarks = [
     shoulder_keypoint, 
-    mp_pose.PoseLandmark.RIGHT_ELBOW.value,
+    elbow_keypoint,
     wrist_keypoint,
 ]
 
@@ -90,7 +84,14 @@ def get_keypoint_as_np(keypoint):
 
 ### Method used for publishing the spherical coordinates 
 def publish_keypoint(pub, results):
+
+    def zero_out(arr, n):
+        new_arr = np.copy(arr)
+        new_arr[n] = 0
+        return new_arr
+
     shoulder = np.array([])
+    wrist = np.array([])
     spherical_coordinates = None
 
     for idx, keypoint in enumerate(results.pose_world_landmarks.landmark):
@@ -101,6 +102,14 @@ def publish_keypoint(pub, results):
 
             shoulder = get_keypoint_as_np(keypoint)
 
+        if idx == elbow_keypoint:
+            if not verify_visibility(keypoint=keypoint, visibility_threshold = VISIBILITY_THRESHOLD):
+                rospy.logwarn("Wrist keypoint not visible")
+                break
+
+            elbow = get_keypoint_as_np(keypoint)
+
+
         if idx == wrist_keypoint and shoulder.size != 0:
             if not verify_visibility(keypoint=keypoint, visibility_threshold = VISIBILITY_THRESHOLD):
                 rospy.logwarn("Wrist keypoint not visible")
@@ -113,76 +122,94 @@ def publish_keypoint(pub, results):
             if shifted_keypoint[0] > 0:
                 rospy.logerr("Wrist has to be to the right of the shoulder")    
                 break
-
-
+                        
+            # TODO: THETA
             # Calculating the angle between the keypoint and the z-axis (depth)
-            theta = vg.angle(shifted_keypoint, z_axis)
-            # Calculating the angle between the keypoint and the y-axis (height)
-            # Scaling the angle which defines the height to mimic the robotic arm's range of movement
+            theta_keypoints = zero_out(shifted_keypoint, 1)
+            theta = vg.angle(theta_keypoints, z_axis)
 
-            phi = vg.angle(shifted_keypoint, y_axis)
-
-
-            ## TODO: Global variable 
-            if phi < 30 or phi > 150:
-                rospy.logwarn("Invalid angle")
-                break
-
-            phi = adjust_range(30, 150, 30, 80, phi)
+        
+            # Calculating the angle between the keypoint and the y-axis (height)
+            phi_keypoints = zero_out(shifted_keypoint, 2)
+            phi = vg.angle(phi_keypoints, y_axis)
 
+            if (phi < 20 or phi > 150):
+                rospy.logerr("Out of range")
+                return
 
             distance = euclid_dist(np.array([0,0,0]), shifted_keypoint)
 
+            # TODO: Check maximum available distance
             # Scaling the range of the distance from mediapipe to manipulator
             dist = adjust_range(mp_distance_range[0], mp_distance_range[1], 
                                 robot_distance_range[0], robot_distance_range[1], 
                                 distance)
+
+            # TODO:
+            wrist_vec = wrist - elbow
+            wrist_vec = zero_out(wrist_vec, 2)
+
+            wrist_angle = vg.angle(wrist_vec, -x_axis, units="rad")
+            if wrist[1] < elbow[1]:
+                wrist_angle = -wrist_angle
         
             spherical_coordinates = SphericalCoordinates()
-            spherical_coordinates.theta = math.radians(theta)
-            spherical_coordinates.phi = math.radians(phi)
+            spherical_coordinates.theta = theta
+            spherical_coordinates.phi = phi
             spherical_coordinates.distance = dist
+            spherical_coordinates.wrist_angle = wrist_angle
 
         elif idx == wrist_keypoint and shoulder.size == 0:
             rospy.logwarn("Shoulder keypoint missing")
 
+    # If spherical coordinates are defined, they are published
     if spherical_coordinates:
         pub.publish(spherical_coordinates)
 
 
 def skeleton_estimation_pose(pub, img, pose, data_collection = True):
+    # Running mediapipe to extract the keypoints
     results = pose.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
 
+    # Getting the pose landmarks from the resulting pose
     pose_landmarks = results.pose_landmarks
     try:
+        # Getting the right arm keypoints
         arm = [results.pose_landmarks.landmark[arm_keypoint] for arm_keypoint in arm_landmarks]
         pose_landmarks = landmark_pb2.NormalizedLandmarkList(landmark = arm)
     except Exception as e:
         pass
 
+    # Drawing the landmarks on the frame of the video (i.e. keypoints)
     mp_drawing.draw_landmarks(
         img,
         pose_landmarks,
         landmark_drawing_spec=mp_ds
     )
 
+    # Displaying the current frame of the video
     cv2.imshow('Pose', cv2.flip(img,1))
 
+    # Publishing the keypoints only if data collection has been set to true and there are results available
     if data_collection and results.pose_world_landmarks:
         publish_keypoint(pub=pub, results=results)
 
         
 def video_capture():
+    # Defining the current node
     node = 'arm_keypoint_capture'
     rospy.init_node(node)
 
+    # Creating a publisher of spherical coordinates
     pub = rospy.Publisher(node, SphericalCoordinates, queue_size=50)
 
-    if (video_replay):
+    # Capturing the video with a webcam or with an mp4 video
+    if (VIDEO_REPLAY):
         video = cv2.VideoCapture(VIDEO)
     else:
         video = cv2.VideoCapture(0)
 
+    # Initially the data is not published
     data_collection = False
 
     # Check if camera opened successfully
@@ -190,23 +217,29 @@ def video_capture():
         print("Error opening video stream or file")
         return
 
+    # If video is None the program will exit
     if (not video):
         print("Error - video is None")
         return
 
+    # Mediapipe pose estimation
     with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=2, enable_segmentation=True) as pose:
-
+        
         print("Press 'E' to capture the keypoints")
 
         while not rospy.is_shutdown():
+            # Read the captured video 
             _, capture = video.read()
             
-            try:
+            try:   
+                # Running pose estimation on the video and publishing the coordinates
                 skeleton_estimation_pose(pub, capture, pose, data_collection)
             except Exception as e:
-                rospy.logwarn("Video finished")
+                # If there was an exception, the program should exit
+                rospy.logwarn(f"Finished with: {e}")
                 break
 
+            # Initialize data collection with an 'E' key
             key = cv2.waitKey(1) 
             if key == ord('e'):
                 data_collection = not data_collection
@@ -215,11 +248,11 @@ def video_capture():
                 else:
                     rospy.loginfo("Publishing of data stopped")
 
-            # TODO: Quit ROS 
+            # Exit the program with a 'Q' key press
             if key == ord('q'):
                 rospy.logwarn("Closing the program")
                 break
-
+        # Release the video from memory and clear all the openCV windows
         video.release()
         cv2.destroyAllWindows()
 
diff --git a/pose_estimation/scripts/capture_keypoints.py b/pose_estimation/scripts/capture_keypoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcc5babff16b5a431f3797b1098e8b061c3b79e9
--- /dev/null
+++ b/pose_estimation/scripts/capture_keypoints.py
@@ -0,0 +1,95 @@
+import mediapipe as mp
+import cv2
+import os
+import pickle
+
+from mediapipe.framework.formats import landmark_pb2
+
+### Replaying a pre-recorded mp4 video or using the webcam
+VIDEO_REPLAY = True
+test_video = "test_movement_2.mp4"
+VIDEO = f"{os.getcwd()}/../test/{test_video}"
+
+FILENAME = 'keypoints.pickle'
+
+### Mediapipe drawing
+mp_drawing = mp.solutions.drawing_utils 
+mp_drawing_styles = mp.solutions.drawing_styles
+mp_ds = mp_drawing_styles.get_default_pose_landmarks_style()
+
+### Mediapipe Pose Estimation
+mp_pose = mp.solutions.pose
+
+### Arm Landmarks
+arm_landmarks = [
+    mp_pose.PoseLandmark.RIGHT_SHOULDER.value, # 12
+    mp_pose.PoseLandmark.RIGHT_ELBOW.value, # 14
+    mp_pose.PoseLandmark.RIGHT_WRIST.value, # 16
+]
+
+def generate_keypoints(img, pose, keypoint_dict, dict_idx):
+    # Running mediapipe to extract the keypoints
+    results = pose.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
+
+    # Getting the pose landmarks from the resulting pose
+    pose_landmarks = results.pose_landmarks
+    try:
+        # Getting the right arm keypoints
+        arm = [results.pose_landmarks.landmark[arm_keypoint] for arm_keypoint in arm_landmarks]
+        pose_landmarks = landmark_pb2.NormalizedLandmarkList(landmark = arm)
+    except Exception as e:
+        pass
+
+    # Drawing the landmarks on the frame of the video (i.e. keypoints)
+    mp_drawing.draw_landmarks(
+        img,
+        pose_landmarks,
+        landmark_drawing_spec=mp_ds
+    )
+
+    keypoint_dict[dict_idx] = {}
+
+    if results.pose_world_landmarks:
+        for idx, keypoint in enumerate(results.pose_world_landmarks.landmark):
+            if idx in arm_landmarks:
+                coordinates = (keypoint.x, keypoint.z, keypoint.y)
+                keypoint_dict[dict_idx][idx] = coordinates
+
+    return keypoint_dict
+
+def capture():
+    # Capturing the video with a webcam or with an mp4 video
+    if (VIDEO_REPLAY):
+        video = cv2.VideoCapture(VIDEO)
+    else:
+        video = cv2.VideoCapture(0)
+
+    keypoints_dict = {}
+
+    # Mediapipe pose estimation
+    with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=2, enable_segmentation=True) as pose:
+        
+        idx = 0
+        while(video.isOpened()):
+            # Read the captured video 
+            _, capture = video.read()
+            
+            try:
+                # Running pose estimation on the video and publishing the coordinates
+                keypoints_dict = generate_keypoints(capture, pose, keypoints_dict, idx)
+            except Exception as e:
+                break
+            
+            idx += 1
+            
+    # Release the video from memory and clear all the openCV windows
+    video.release()
+    cv2.destroyAllWindows()
+    
+    # Saving the keypoints with time stamps into a pickle file
+    with open(FILENAME, 'wb') as handle:
+        pickle.dump(keypoints_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)     
+
+if __name__ == "__main__":
+    capture()
+
diff --git a/pose_estimation/scripts/keypoints.pickle b/pose_estimation/scripts/keypoints.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..f49cda91bc0c17a30b4baaf497b78f13b646b887
Binary files /dev/null and b/pose_estimation/scripts/keypoints.pickle differ
diff --git a/pose_estimation/scripts/pose_estimation_control.py b/pose_estimation/scripts/pose_estimation_control.py
index 7d39d4314a7073146da07f0ae9531c9b2319c968..5218bfbd7a065af855c6f57dd5cbd960e4ba5f9d 100755
--- a/pose_estimation/scripts/pose_estimation_control.py
+++ b/pose_estimation/scripts/pose_estimation_control.py
@@ -1,3 +1,12 @@
+#############################################
+#                     TODO                  #
+# Debug: Show Manipulator                   # 
+# Test Motion                               #
+# Vector from elbow                         #                  
+#############################################
+# Remove DEBUG - Incorporate with visualize #
+
+
 import rospy
 import math
 import itertools
@@ -5,15 +14,12 @@ import numpy as np
 
 from math import sin, cos
 
-### Importing the messages for OpenManipulator's joint movement
-from open_manipulator_msgs.srv import SetJointPosition
-from open_manipulator_msgs.msg import JointPosition
-
 from geometry_msgs.msg import Pose
 from geometry_msgs.msg import PointStamped
 
-from open_manipulator_msgs.srv import SetKinematicsPose
-from open_manipulator_msgs.msg import KinematicsPose
+from open_manipulator_msgs.srv import SetKinematicsPose, SetJointPosition, GetJointPosition, SetJointPositionRequest
+from open_manipulator_msgs.msg import KinematicsPose, JointPosition
+from sensor_msgs.msg import JointState
 
 ### Importing the messages for Spherical Coordinates
 from pose_estimation.msg import SphericalCoordinates
@@ -26,7 +32,7 @@ JOINT_NAMES = ["joint1", "joint2", "joint3", "joint4"]
 
 ### Variables for moving the gripper
 END_EFFECTOR_NAME = "gripper"
-PATH_TIME = 2
+PATH_TIME = 1
 
 ### Starting joint positions
 START_LOCATION = {
@@ -36,64 +42,109 @@ START_LOCATION = {
 }
 
 ### If set to true will publish the coordinates as PointStamped msg which can be displayed in RVIZ
-DEBUG = True
+DEBUG = False
 
 # Calculating the distance between two points
 def dis(point1, point2):
     return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2)
 
+# USE Cloud Point
+def create_vis_marker(coordinates):
+    # Creating a marker which can be visualized in RVIZ
+    msg = Marker()
+
+    # Creating a custom frame and setting the timestamp
+    msg.header.frame_id = "my_frame"
+    msg.header.stamp = rospy.Time.now()
+
+    # Setting the position of the marker
+    msg.pose.position.x = coordinates['x'] * 10
+    msg.pose.position.y = coordinates['y'] * 10
+    msg.pose.position.z = coordinates['z'] * 10   
+
+    msg.pose.orientation.x = 0
+    msg.pose.orientation.y = 0
+    msg.pose.orientation.z = 0
+    msg.pose.orientation.w = 0
+
+    # Setting the scale
+    msg.scale.x = 0.2 * 10
+    msg.scale.y = 0.01 * 10
+    msg.scale.z = 0.05 * 10
+
+    # Making the marker visible and pink
+    msg.color.r = 199
+    msg.color.g = 21
+    msg.color.b = 133
+    msg.color.a = 1.0 
+
+    return msg
 
-# SOURCES: https://math.libretexts.org/Bookshelves/Calculus/Book%3A_Calculus_(OpenStax)/12%3A_Vectors_in_Space/12.07%3A_Cylindrical_and_Spherical_Coordinates
-def spherical_to_cartesian(d, theta, phi):
+
+### TODO: Refactor
+def adjust_range(old_min, old_max, new_min, new_max, old_value):
+    old_range = old_max - old_min
+    new_range = new_max - new_min
+
+    return (((old_value - old_min) * new_range) / old_range) + new_min
+
+def spherical_to_cartesian(dist, theta, phi):
     global debug_pub
 
-    # TODO: 
-    # x = d * sin(phi) * cos(theta)
-    # y = d * sin(phi) * sin(theta)
-    # z = d * cos(phi)
-    x = d * cos(phi) * sin(theta)
-    y = d * sin(phi) * cos(theta)
-    z = d * sin(phi)
+    def invert_sign(val):
+        return -val
+
+    print(f"Theta: {theta}")
+
+    phi = math.radians(adjust_range(30, 150, 90, 180, phi))
+    theta = math.radians(theta)
+
+    x = dist * sin(phi)
+    y = dist * cos(theta)
+    z = dist * invert_sign(cos(phi))
 
     coordinates = {
-        "x" : x, #START_LOCATION['x'],
-        "y" : START_LOCATION['y'],
+        "x" : x, 
+        "y" : y,
         "z" : z
     }
     
-
     if (DEBUG):
-        msg = Marker()
-
-        msg.header.frame_id = "my_frame"
-        msg.header.stamp = rospy.Time.now()
+        print(f"Distance {dist}")
+        print(f"Coordinates: {coordinates}")
+        print()
 
-        msg.pose.position.x = coordinates['x'] * 10
-        msg.pose.position.y = coordinates['y'] * 10
-        msg.pose.position.z = coordinates['z'] * 10   
+        msg = create_vis_marker(coordinates)
+        debug_pub.publish(msg)
 
-        msg.pose.orientation.x = 0
-        msg.pose.orientation.y = 0
-        msg.pose.orientation.z = 0
-        msg.pose.orientation.w = 0
+    return coordinates
 
-        msg.scale.x = 0.2 * 10
-        msg.scale.y = 0.01 * 10
-        msg.scale.z = 0.05 * 10
 
-        msg.color.r = 199
-        msg.color.g = 21
-        msg.color.b = 133
-        msg.color.a = 1.0 
+### TODO: Joint Angle
+def set_wrist_angle(pos):
+    pass
+    # TODO: Exception and stop program if connections fails
+    
+    # joint_position = JointPosition()
+    # joint_position.joint_name = JOINT_NAMES
+    # joint_position.position = [1.0, 1.0, -1.0, 0.50]
+    
+    # resp = set_joints('', joint_position, PATH_TIME)
 
-        debug_pub.publish(msg)
+    rospy.wait_for_service('goal_joint_space_path', 2)
+    set_joints = rospy.ServiceProxy('goal_joint_space_path', SetJointPosition)
 
+    goal_request= SetJointPositionRequest()
+    goal_request.joint_position.joint_name = JOINT_NAMES
+    goal_request.joint_position.position = [1.0, 1.0, -1.0, 0.50]
+    goal_request.joint_position.max_accelerations_scaling_factor = 0.0
+    goal_request.joint_position.max_velocity_scaling_factor = 0.0
+    goal_request.path_time = 2.0
 
-        print(f"Distance {d}")
-        print(coordinates)
-        print()
-    return coordinates
+    resp = set_joints(goal_request)
 
+    if not resp.is_planned:
+        rospy.logerr("Failed to solve IK equation for wrist")
 
 def set_position(target_location):
     # TODO: Exception and stop program if connections fails
@@ -112,18 +163,22 @@ def set_position(target_location):
     resp = set_kinematics_pose(end_effector_name = END_EFFECTOR_NAME, kinematics_pose=kinematics_pose, path_time = PATH_TIME)
 
     if not resp.is_planned:
-        rospy.logerr("Failed to sovle IK equation")
+        rospy.logerr("Failed to solve IK equation")
         print(f"X: {target_location['x']}; Y: {target_location['y']}; Z: {target_location['z']}")
 
 
 def video_capture(coordinate):
     # Converting the spherical coordiantes received from pose estimation to cartesian coordinates
     target_location = spherical_to_cartesian(coordinate.distance, coordinate.theta, coordinate.phi)
-
+    
+    
     if(not DEBUG):
         try:
+            # TODO: SET Wrist
+            set_wrist_angle(coordinate.wrist_angle)
+
             # Setting the location of the robotic arm
-            set_position(target_location)
+            # set_position(target_location)
 
         except rospy.ServiceException as e:
             rospy.logerr(f"Service call failed {e}")
@@ -131,13 +186,29 @@ def video_capture(coordinate):
         # Sleep equal to the time needed for the arm to move to the desired location
         rospy.sleep(PATH_TIME)
 
+JOINT_POSITION_TMP = -100
+
+def joint_state(msg):
+    global JOINT_POSITION_TMP
+
+    angle = round(msg.position[-1], 0)
+    if JOINT_POSITION_TMP != angle:
+        print(f"New angle position: {angle}")
+        JOINT_POSITION_TMP = angle
+
 def pose_estimation():
     # Moving the arm to the starting position
     rospy.loginfo("Moving the arm to the starting location")
+
+    print("Subscribing to `/joint_states`")
+    rospy.Subscriber("/joint_states", JointState, joint_state, queue_size = 1)
     
     if (not DEBUG):
+        set_wrist_angle(2)
         set_position(START_LOCATION)
 
+    return
+
     print("Subscribing to `arm_keypoint_capture`")
     rospy.Subscriber("arm_keypoint_capture", SphericalCoordinates, video_capture, queue_size = 1)
 
diff --git a/pose_estimation/scripts/visualize_keypoints.py b/pose_estimation/scripts/visualize_keypoints.py
new file mode 100644
index 0000000000000000000000000000000000000000..73ee6dcecd6b526d65999c16e4756dc3bce9302a
--- /dev/null
+++ b/pose_estimation/scripts/visualize_keypoints.py
@@ -0,0 +1,184 @@
+# Distance of the Manipulator -> 0.36
+# Max distance of the Arm -> 0.53
+# Scale = 0.36/0.53
+
+import pickle
+import time
+import rospy
+import tf
+
+import numpy as np
+import mediapipe as mp
+
+from tf.transformations import quaternion_matrix, translation_matrix
+
+from geometry_msgs.msg import Point32
+from geometry_msgs.msg import Pose
+from sensor_msgs.msg import PointCloud
+from open_manipulator_msgs.msg import KinematicsPose
+from open_manipulator_msgs.srv import SetKinematicsPose
+
+mp_pose = mp.solutions.pose
+SHOULDER = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
+WRIST = mp_pose.PoseLandmark.RIGHT_WRIST.value
+
+# Pickle file with the keypoints
+FILENAME = 'keypoints.pickle'
+
+# Defining ROS Node to publish the messages
+frame = 'keypoint_frame'
+node = 'keypoint_capture'
+
+pub_shoulder = rospy.Publisher(f"{node}/shoulder", PointCloud, queue_size=50)
+pub_wrist = rospy.Publisher(f"{node}/wrist", PointCloud, queue_size=50)
+
+
+# TODO: Remove if not using
+manipulator_state = None
+
+def joint_manipulation(joint_state):
+    global manipulator_state
+    manipulator_state = joint_state
+
+
+def shift_keypoints(original, offset):
+    return (np.array(original) - np.array(offset))
+
+def publish_point(keypoint, offset, timestamp, pub):
+    point = Point32()
+    shifted_keypoint = shift_keypoints(keypoint, offset)
+    point.x, point.y, point.z = shifted_keypoint
+    point.z = -point.z
+
+    msg = PointCloud()
+    msg.header.frame_id = frame
+    msg.header.stamp = timestamp
+    msg.points = [point]
+
+    pub.publish(msg)
+
+    return point
+
+
+def broadcast(point, node_from, node_to, time, orientation = (0,0,0)):
+    br = tf.TransformBroadcaster()
+
+    br.sendTransform((point.x, point.y, point.z),
+                      tf.transformations.quaternion_from_euler(orientation[0], orientation[1], orientation[2]),
+                      time,
+                      node_from,
+                      node_to)
+
+def point_to_np(point):
+    return np.array([point.x, point.y, point.z])
+
+def np_to_point(arr):
+    point = Point32()
+    point.x, point.y, point.z = arr[0], arr[1], arr[2]
+    return point
+
+def homogenous_transformation(old_position, transform_matrix):
+    # Converting the position to numpy array and appending one to it to make it a 4x1 vector
+    old_position_vect = point_to_np(old_position)
+    old_position_vect = np.append(old_position_vect, [1])
+    old_position_vect = old_position_vect.reshape((4,1))
+
+    # Multiplying the old point with the transform matrix to get the point in the other coordinate frame
+    transformed_point = np.matmul(transform_matrix, old_position_vect)
+
+    return np_to_point(transformed_point)
+
+def create_matrix(rot, trans):
+    # Converting the quaternion and position into matrices
+    rot_matrix = quaternion_matrix(rot)
+    trans_matrix = translation_matrix(trans)
+
+    # Combining the two matrices to create a 4x4 homogenous transformation matrix
+    rot_matrix = rot_matrix[:,:3]
+    trans_matrix = np.array([trans_matrix[:,3]])
+    transform_matrix = np.concatenate((rot_matrix, trans_matrix.T), axis=1)
+
+    return transform_matrix
+
+
+def create_transform_matrix(old_frame, new_frame, time):
+    transform = None
+    listener = tf.TransformListener()
+    
+    # TODO: Why does it fail at times? Time?
+    try: 
+        listener.waitForTransform(new_frame, old_frame, time, rospy.Duration(1.0))
+        (trans, rot) = listener.lookupTransform(new_frame, old_frame,  time)
+
+        transform = create_matrix(rot, trans)
+    except Exception as e:
+        rospy.logwarn(e)
+        
+    
+    return transform
+
+def visualization():
+    gripper_topic = "/gripper/kinematics_pose"
+    print(f"Subscribing: {gripper_topic}")
+
+    rospy.Subscriber(gripper_topic, KinematicsPose, joint_manipulation, queue_size = 1)
+    
+    while not manipulator_state:
+        continue
+    
+    print("Initial manipulator location has been received")
+    
+    # Reading the pickle file and saving its content as a dictionary
+    with open(FILENAME, 'rb') as handle:
+        captured_keypoints = pickle.load(handle)
+
+    # Iterating through all of the frames with keypoints
+    for timestamp, keypoints in captured_keypoints.items():
+        if rospy.is_shutdown():
+            break
+        
+        if not keypoints:
+            continue
+
+        if SHOULDER not in keypoints or WRIST not in keypoints:
+            continue
+
+        ros_time = rospy.Time.now()
+
+        timestamp = rospy.Time.now()
+        offset = keypoints[SHOULDER]
+
+        shoulder = keypoints[SHOULDER]
+        shoulder_point = publish_point(shoulder, offset, timestamp, pub_shoulder)
+
+        wrist = keypoints[WRIST]
+        wrist_point = publish_point(wrist, offset, timestamp, pub_wrist)
+
+        broadcast(shoulder_point, f"{node}/shoulder", frame, ros_time, orientation=(0, 0, np.pi))
+        broadcast(wrist_point, f"{node}/wrist", f"{node}/shoulder", ros_time)
+
+        # TODO: Rotation; Scale (Calibration)
+        manipulator_position = np_to_point((point_to_np(wrist_point) * (0.36/0.53)))
+
+        # TODO:
+        # manipulator_position = manipulator_state.pose.position
+        transform = create_transform_matrix(f"{node}/shoulder", f"{node}/wrist", ros_time)
+        
+        if transform is None:
+            continue
+
+        relative_manipulator = homogenous_transformation(manipulator_position, transform, )
+        broadcast(relative_manipulator, gripper_topic, f"{node}/wrist", ros_time)
+
+
+        # Sleep
+        time.sleep(0.5)
+
+
+if __name__ == '__main__':
+    rospy.init_node(node)
+
+    try:
+        visualization()
+    except rospy.ROSInterruptException as e:
+        print("Exception: " + e)
diff --git a/pose_estimation/test/test_movement_2.mp4 b/pose_estimation/test/test_movement_2.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..8fd7d20a6dfd5adf805242292b6ce509849dcba3
Binary files /dev/null and b/pose_estimation/test/test_movement_2.mp4 differ