diff --git a/pose_estimation/CMakeLists.txt b/pose_estimation/CMakeLists.txt
index 9020edfb32943dc4a908c7ebb7fbd1c095513f7b..8a75491b941b34c959fac7ba1f0f6213074816ff 100644
--- a/pose_estimation/CMakeLists.txt
+++ b/pose_estimation/CMakeLists.txt
@@ -8,6 +8,8 @@ find_package(catkin REQUIRED COMPONENTS
   open_manipulator_msgs
   geometry_msgs
   message_generation
+  open_manipulator_libs
+  robotis_manipulator
 )
 
 ################################################
@@ -20,19 +22,29 @@ add_message_files(
   # Keypoint.msg
   # Keypoints.msg
   SphericalCoordinates.msg
+  JointAngle.msg
+  JointPositions.msg
 )
 
 ## Generate added messages and services with any dependencies listed here
 generate_messages(
   DEPENDENCIES
   std_msgs
+  open_manipulator_msgs
 )
 
 ###################################
 ## catkin specific configuration ##
 ###################################
 catkin_package(
-  CATKIN_DEPENDS message_runtime
+  INCLUDE_DIRS include
+  CATKIN_DEPENDS
+    message_runtime
+    roscpp
+    std_msgs
+    open_manipulator_msgs
+    robotis_manipulator
+    open_manipulator_libs
 )
 
 ###########
@@ -42,6 +54,7 @@ catkin_package(
 ## Specify additional locations of header files
 ## Your package locations should be listed before other locations
 include_directories(
+  include
   ${catkin_INCLUDE_DIRS}
 )
 
@@ -53,5 +66,10 @@ include_directories(
 ## in contrast to setup.py, you can choose the destination
 catkin_install_python(PROGRAMS
   scripts/pose_estimation_control.py scripts/arm_keypoint_capture.py
+  scripts/data_collection.py scripts/training_data.py
   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
-)
\ No newline at end of file
+)
+
+
+add_executable(inverse_kinematics_solver src/inverse_kinematics_solver.cpp)
+target_link_libraries(inverse_kinematics_solver ${catkin_LIBRARIES})
diff --git a/pose_estimation/data/collected_data.pickle b/pose_estimation/data/collected_data.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..c5383907f4cb198c0092b5b9ab5b9802df907905
Binary files /dev/null and b/pose_estimation/data/collected_data.pickle differ
diff --git a/pose_estimation/data/gesture_recognizer.task b/pose_estimation/data/gesture_recognizer.task
new file mode 100644
index 0000000000000000000000000000000000000000..1c6adc8b497d3e6d6603dfc7c864136d75f66e88
Binary files /dev/null and b/pose_estimation/data/gesture_recognizer.task differ
diff --git a/pose_estimation/data/keypoint_dataset.pickle b/pose_estimation/data/keypoint_dataset.pickle
new file mode 100644
index 0000000000000000000000000000000000000000..d24bc86a17811fbc5fb4f8d6dfb1cc2fac97c320
Binary files /dev/null and b/pose_estimation/data/keypoint_dataset.pickle differ
diff --git a/pose_estimation/data/models/joint_predict.pt b/pose_estimation/data/models/joint_predict.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6a0e4a3c1207a6885dfe573d323568691cd5d6e7
Binary files /dev/null and b/pose_estimation/data/models/joint_predict.pt differ
diff --git a/pose_estimation/include/pose_estimation/inverse_kinematics_solver.h b/pose_estimation/include/pose_estimation/inverse_kinematics_solver.h
new file mode 100644
index 0000000000000000000000000000000000000000..ee92208e165a50c2ea2baa7e0206f5f30fdf41e8
--- /dev/null
+++ b/pose_estimation/include/pose_estimation/inverse_kinematics_solver.h
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "open_manipulator_libs/kinematics.h"
+#include "open_manipulator_libs/open_manipulator.h"
+#include "open_manipulator_msgs/GetKinematicsPose.h"
+#include "open_manipulator_msgs/SetKinematicsPose.h"
+#include "pose_estimation/JointPositions.h"
+#include <robotis_manipulator/robotis_manipulator.h>
+#include "ros/ros.h"
+   
+class InverseKinematicsSolver
+{
+ public:
+   InverseKinematicsSolver(bool using_platform, std::string usb_port, std::string baud_rate, double control_period);
+   
+   void keypointsCallback(const open_manipulator_msgs::KinematicsPose& msg);
+   
+   ros::NodeHandle getNodeHandle() const { return n_; } 
+
+ private:
+   OpenManipulator open_manipulator_;
+   std::vector<JointValue>* goal_joint_value_;
+   
+   ros::NodeHandle n_;
+   ros::Publisher ik_pub_;
+   robotis_manipulator::Kinematics *kinematics_;
+
+   void solveIK(Pose target_pose, const open_manipulator_msgs::KinematicsPose& manipulator_pose);
+};
\ No newline at end of file
diff --git a/pose_estimation/launch/keypoint_collection.launch b/pose_estimation/launch/keypoint_collection.launch
new file mode 100644
index 0000000000000000000000000000000000000000..17d61c622f294903c65b7aefe30a40539795956a
--- /dev/null
+++ b/pose_estimation/launch/keypoint_collection.launch
@@ -0,0 +1,37 @@
+<launch>
+    <arg name="gesture_model" doc="Name of the gesture recognition model. Located in `data/`" default="gesture_recognizer.task" />
+    <arg name="output_file" doc="Name of the file where the data will be saved" default="collected_data.pickle" />
+    <arg name="video_save" doc="Save video in the output file? ('true' or 'false')" default="true" />
+    
+    <arg name="ik_model" doc="Name trained model. Located in `data/models`" default="joint_predict.pt" />
+    <arg name="manipulator_control" doc="Control or Collection?" default="false" />
+
+
+    <!-- IF MANIPULATOR CONTROL START GAZEBO AND CONTROLLER -->
+
+    <!-- Running Gazebo Simulation with Open Manipulator-X if control is on -->
+    <include file="$(find open_manipulator_gazebo)/launch/open_manipulator_gazebo.launch" if="$(eval arg('manipulator_control') == 'true')">
+        <arg name="paused" value="false"/>
+    </include>
+
+    <arg name="usb_port"               default="/dev/ttyUSB0"/>
+    <arg name="baud_rate"              default="1000000"/>
+    <arg name="control_period"         default="0.010"/>
+    <arg name="use_platform"           default="false"/>
+    
+    <node name="open_manipulator_controller" pkg="open_manipulator_controller" type="open_manipulator_controller" 
+        output="log" args="$(arg usb_port) $(arg baud_rate)" if="$(eval arg('manipulator_control') == 'true')">
+        <param name="control_period"       value="$(arg control_period)"/>
+        <param name="using_platform"       value="$(arg use_platform)"/>
+    </node>
+
+
+    <node name="keypoint_collection" pkg="pose_estimation" type="data_collection.py" output="screen">
+        <param name="gesture_model"         value="$(arg gesture_model)"/>
+        <param name="filename"              value="$(arg output_file)"/>
+        <param name="video_save"            value="$(arg video_save)"/>
+        <param name="ik_model"              value="$(arg ik_model)"/>
+        <param name="manipulator_control"   value="$(arg manipulator_control)"/>
+    </node>
+    
+</launch>
diff --git a/pose_estimation/launch/training_data.launch b/pose_estimation/launch/training_data.launch
new file mode 100644
index 0000000000000000000000000000000000000000..064e88f5618e7554890626df8d0fe8de1e8741b2
--- /dev/null
+++ b/pose_estimation/launch/training_data.launch
@@ -0,0 +1,37 @@
+<launch>
+    <arg name="input_keypoint_file" doc="Name of the input file with keypoints (from data_collection)" default="collected_data.pickle" />
+    <arg name="training_file" doc="Name of the file where the training data will be saved" default="keypoint_dataset.pickle" />
+    <arg name="simulate" doc="Simulate the motion in Gazebo?" default="false"/>
+    <arg name="show_video" doc="Show the input video" default="false" />
+
+    <!-- Running Gazebo Simulation with Open Manipulator-X -->
+    <!-- <include file="$(find open_manipulator_gazebo)/launch/open_manipulator_gazebo.launch" if="$(eval arg('simulate') == 'true')">
+        <arg name="paused" value="false"/>
+    </include> -->
+
+    <!-- <arg name="usb_port"               default="/dev/ttyUSB0"/>
+    <arg name="baud_rate"              default="1000000"/>
+    <arg name="control_period"         default="0.010"/>
+    <arg name="use_platform"           default="false"/>
+    
+    <node name="open_manipulator_controller" pkg="open_manipulator_controller" type="open_manipulator_controller" 
+        output="log" args="$(arg usb_port) $(arg baud_rate)" if="$(eval arg('simulate') == 'true')">
+        <param name="control_period"       value="$(arg control_period)"/>
+        <param name="using_platform"       value="$(arg use_platform)"/>
+    </node> -->
+
+
+    <!-- Running rviz with the TF graph setup -->
+    <node type="rviz" name="rviz" pkg="rviz" args="-d $(find pose_estimation)/rviz/keypoint_tf_frame.rviz" />
+
+    <!-- Running Inverse Kinematics Solver Node -->
+    <node name="ik_solver" pkg="pose_estimation" type="inverse_kinematics_solver" output="screen" />
+
+    <node name="training_data" pkg="pose_estimation" type="training_data.py" output="screen">
+        <param name="input_keypoint_file" value="$(arg input_keypoint_file)"/>
+        <param name="training_file"       value="$(arg training_file)"/>
+        <param name="simulate"            value="$(arg simulate)"/>
+        <param name="show_video"          value="$(arg show_video)"/>
+    </node>
+
+</launch>
diff --git a/pose_estimation/msg/JointAngle.msg b/pose_estimation/msg/JointAngle.msg
new file mode 100644
index 0000000000000000000000000000000000000000..27a75a0b34f1a8526468db736b25156a1f9cdb6e
--- /dev/null
+++ b/pose_estimation/msg/JointAngle.msg
@@ -0,0 +1,2 @@
+string name
+float64 angle
\ No newline at end of file
diff --git a/pose_estimation/msg/JointPositions.msg b/pose_estimation/msg/JointPositions.msg
new file mode 100644
index 0000000000000000000000000000000000000000..b76eaa7071c1ba2a35cc05625c4dd92b9a535e76
--- /dev/null
+++ b/pose_estimation/msg/JointPositions.msg
@@ -0,0 +1,4 @@
+JointAngle[] jointPositions
+open_manipulator_msgs/KinematicsPose manipulatorPose
+
+bool success
\ No newline at end of file
diff --git a/pose_estimation/package.xml b/pose_estimation/package.xml
index c99581201b0d6eb4873cf45fd609f36fe1ffcead..95a8c873710f1dfaa543326e73c7093f8223e374 100644
--- a/pose_estimation/package.xml
+++ b/pose_estimation/package.xml
@@ -66,6 +66,12 @@
   <!-- Message Generation -->
   <build_depend>message_generation</build_depend>
   <exec_depend>message_runtime</exec_depend>
+
+  <build_depend>robotis_manipulator</build_depend>
+  <exec_depend>robotis_manipulator</exec_depend>
+  <build_depend>open_manipulator_libs</build_depend>
+  <exec_depend>open_manipulator_libs</exec_depend>
+  
   
   <!-- The export tag contains other, unspecified, tags -->
   <export>
diff --git a/pose_estimation/rviz/keypoint_tf_frame.rviz b/pose_estimation/rviz/keypoint_tf_frame.rviz
new file mode 100644
index 0000000000000000000000000000000000000000..3e2b384e83c24909dcd53d9be5ae160a1c14c8a9
--- /dev/null
+++ b/pose_estimation/rviz/keypoint_tf_frame.rviz
@@ -0,0 +1,135 @@
+Panels:
+  - Class: rviz/Displays
+    Help Height: 78
+    Name: Displays
+    Property Tree Widget:
+      Expanded:
+        - /Global Options1
+        - /Status1
+        - /TF1
+      Splitter Ratio: 0.5
+    Tree Height: 549
+  - Class: rviz/Selection
+    Name: Selection
+  - Class: rviz/Tool Properties
+    Expanded:
+      - /2D Pose Estimate1
+      - /2D Nav Goal1
+      - /Publish Point1
+    Name: Tool Properties
+    Splitter Ratio: 0.5886790156364441
+  - Class: rviz/Views
+    Expanded:
+      - /Current View1
+    Name: Views
+    Splitter Ratio: 0.5
+  - Class: rviz/Time
+    Name: Time
+    SyncMode: 0
+    SyncSource: ""
+Preferences:
+  PromptSaveOnExit: true
+Toolbars:
+  toolButtonStyle: 2
+Visualization Manager:
+  Class: ""
+  Displays:
+    - Alpha: 0.5
+      Cell Size: 1
+      Class: rviz/Grid
+      Color: 160; 160; 164
+      Enabled: true
+      Line Style:
+        Line Width: 0.029999999329447746
+        Value: Lines
+      Name: Grid
+      Normal Cell Count: 0
+      Offset:
+        X: 0
+        Y: 0
+        Z: 0
+      Plane: XY
+      Plane Cell Count: 10
+      Reference Frame: <Fixed Frame>
+      Value: true
+    - Class: rviz/TF
+      Enabled: true
+      Frame Timeout: 15
+      Frames:
+        All Enabled: true
+      Marker Alpha: 1
+      Marker Scale: 1
+      Name: TF
+      Show Arrows: true
+      Show Axes: true
+      Show Names: true
+      Tree:
+        {}
+      Update Interval: 0
+      Value: true
+  Enabled: true
+  Global Options:
+    Background Color: 48; 48; 48
+    Default Light: true
+    Fixed Frame: keypoint_frame
+    Frame Rate: 30
+  Name: root
+  Tools:
+    - Class: rviz/Interact
+      Hide Inactive Objects: true
+    - Class: rviz/MoveCamera
+    - Class: rviz/Select
+    - Class: rviz/FocusCamera
+    - Class: rviz/Measure
+    - Class: rviz/SetInitialPose
+      Theta std deviation: 0.2617993950843811
+      Topic: /initialpose
+      X std deviation: 0.5
+      Y std deviation: 0.5
+    - Class: rviz/SetGoal
+      Topic: /move_base_simple/goal
+    - Class: rviz/PublishPoint
+      Single click: true
+      Topic: /clicked_point
+  Value: true
+  Views:
+    Current:
+      Class: rviz/Orbit
+      Distance: 10
+      Enable Stereo Rendering:
+        Stereo Eye Separation: 0.05999999865889549
+        Stereo Focal Distance: 1
+        Swap Stereo Eyes: false
+        Value: false
+      Field of View: 0.7853981852531433
+      Focal Point:
+        X: 0
+        Y: 0
+        Z: 0
+      Focal Shape Fixed Size: true
+      Focal Shape Size: 0.05000000074505806
+      Invert Z Axis: false
+      Name: Current View
+      Near Clip Distance: 0.009999999776482582
+      Pitch: 0.785398006439209
+      Target Frame: <Fixed Frame>
+      Yaw: 0.785398006439209
+    Saved: ~
+Window Geometry:
+  Displays:
+    collapsed: false
+  Height: 846
+  Hide Left Dock: false
+  Hide Right Dock: false
+  QMainWindow State: 000000ff00000000fd000000040000000000000156000002b0fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002b0000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002b0fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d000002b0000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650100000000000004b0000003bc00fffffffb0000000800540069006d006501000000000000045000000000000000000000023f000002b000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+  Selection:
+    collapsed: false
+  Time:
+    collapsed: false
+  Tool Properties:
+    collapsed: false
+  Views:
+    collapsed: false
+  Width: 1200
+  X: 677
+  Y: 105
diff --git a/pose_estimation/scripts/arm_keypoint_capture.py b/pose_estimation/scripts/arm_keypoint_capture.py
deleted file mode 100755
index 782138acc7b1aad393ac0e44039580daada6d303..0000000000000000000000000000000000000000
--- a/pose_estimation/scripts/arm_keypoint_capture.py
+++ /dev/null
@@ -1,218 +0,0 @@
-############################## TODO ################################
-# 1) the angle closer to the table not accurate
-
-
-
-# TESTS: 
-#      - y (depth) -> poor accuracy 
-#      - z (height) -> better accuracy but gets out of range 
-#      - x (width) -> the range is too small
-
-### VERIFY: spherical to coordinates 
-
-# IMPROVEMENTS: How often do we update the arm? How long to move the arm? Which IKs won't be solved
-# GENERAL: Global variables, comments and structure
-####################################################################
-
-
-import mediapipe as mp
-import numpy as np
-import rospy
-import math
-import cv2
-import vg
-import logging
-
-from mediapipe.framework.formats import landmark_pb2
-
-### Importing messages
-from pose_estimation.msg import SphericalCoordinates
-
-### Mediapipe drawing
-mp_drawing = mp.solutions.drawing_utils 
-mp_drawing_styles = mp.solutions.drawing_styles
-mp_ds = mp_drawing_styles.get_default_pose_landmarks_style()
-
-### Mediapipe Pose Estimation
-mp_pose = mp.solutions.pose
-
-### Unit axis vectors
-x_axis = np.array([1, 0, 0])
-y_axis = np.array([0, 1, 0])
-z_axis = np.array([0, 0, 1])
-
-# TODO: Define in the other file (prove mathematically)
-### Ranges 
-mp_distance_range = (0.0,0.53)
-robot_distance_range = (0.14, 0.46)
-# robot_distance_range = (0.14, 0.46)
-
-### Threshold
-VISIBILITY_THRESHOLD = 0.2
-
-### Arm Landmarks
-wrist_keypoint = mp_pose.PoseLandmark.RIGHT_WRIST.value
-shoulder_keypoint = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
-
-arm_landmarks = [
-    shoulder_keypoint, 
-    mp_pose.PoseLandmark.RIGHT_ELBOW.value,
-    wrist_keypoint,
-]
-
-### Checks if the keypoint is relatively visible
-def verify_visibility(keypoint, visibility_threshold):
-    if keypoint.visibility <= visibility_threshold:
-        return False
-
-    return True
-
-### Adjusts values to a new range
-def adjust_range(old_min, old_max, new_min, new_max, old_value):
-    old_range = old_max - old_min
-    new_range = new_max - new_min
-
-    return (((old_value - old_min) * new_range) / old_range) + new_min
-
-
-def euclid_dist(vec1, vec2):
-  if vec1.size != vec2.size:
-    print("Size of the two points doesn't match")
-    return None
-  
-  sum_ = 0
-  for i in range(vec1.size):
-    squared_difference = (vec1[i] - vec2[i])**2
-    sum_ += squared_difference
-  
-  return math.sqrt(sum_)
-
-
-### Convert keypoint locations to a numpy array
-def get_keypoint_as_np(keypoint):
-    return np.array([keypoint.x, keypoint.y, keypoint.z])
-
-### Method used for publishing the spherical coordinates 
-def publish_keypoint(pub, results):
-    shoulder = np.array([])
-    spherical_coordinates = None
-
-    for idx, keypoint in enumerate(results.pose_world_landmarks.landmark):
-        if idx == shoulder_keypoint:
-            if not verify_visibility(keypoint=keypoint, visibility_threshold = VISIBILITY_THRESHOLD):
-                rospy.logwarn("Shoulder keypoint not visible")
-                break
-
-            shoulder = get_keypoint_as_np(keypoint)
-
-        if idx == wrist_keypoint and shoulder.size != 0:
-            if not verify_visibility(keypoint=keypoint, visibility_threshold = VISIBILITY_THRESHOLD):
-                rospy.logwarn("Wrist keypoint not visible")
-                break
-
-            wrist = get_keypoint_as_np(keypoint)
-
-            shifted_keypoint = wrist - shoulder
-            # Checking if the keypoint is to the left side of the shoulder, i.e. if x is positive (because the image is inverted, otherwise it should be positive)
-            if shifted_keypoint[0] > 0:
-                rospy.logerr("Wrist has to be to the right of the shoulder")    
-                break
-
-
-            # Calculating the angle between the keypoint and the z-axis (depth)
-            theta = vg.angle(shifted_keypoint, z_axis)
-            # Calculating the angle between the keypoint and the y-axis (height)
-            # Scaling the angle which defines the height to mimic the robotic arm's range of movement
-
-            phi = vg.angle(shifted_keypoint, y_axis)
-            ## TODO: Global variable 
-            if phi < 30 or phi > 150:
-                rospy.logwarn("Invalid angle")
-                break
-
-            phi = adjust_range(60, 140, 30, 80, phi)
-
-
-            distance = euclid_dist(np.array([0,0,0]), shifted_keypoint)
-
-            # Scaling the range of the distance from mediapipe to manipulator
-            dist = adjust_range(mp_distance_range[0], mp_distance_range[1], 
-                                robot_distance_range[0], robot_distance_range[1], 
-                                distance)
-        
-
-            print(f"Calculated distance: {distance}")
-
-            spherical_coordinates = SphericalCoordinates()
-            spherical_coordinates.theta = math.radians(theta)
-            spherical_coordinates.phi = math.radians(phi)
-            spherical_coordinates.distance = dist
-
-        elif idx == wrist_keypoint and shoulder.size == 0:
-            rospy.logwarn("Shoulder keypoint missing")
-
-    if spherical_coordinates:
-        pub.publish(spherical_coordinates)
-
-
-def skeleton_estimation_pose(pub, img, pose, data_collection = True):
-    results = pose.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
-
-
-    pose_landmarks = results.pose_landmarks
-    try:
-        arm = [results.pose_landmarks.landmark[arm_keypoint] for arm_keypoint in arm_landmarks]
-        pose_landmarks = landmark_pb2.NormalizedLandmarkList(landmark = arm)
-    except Exception as e:
-        pass
-
-    mp_drawing.draw_landmarks(
-        img,
-        pose_landmarks,
-        landmark_drawing_spec=mp_ds
-    )
-
-    cv2.imshow('Pose', cv2.flip(img,1))
-
-    if data_collection and results.pose_world_landmarks:
-        publish_keypoint(pub=pub, results=results)
-
-        
-def video_capture():
-    node = 'arm_keypoint_capture'
-    rospy.init_node(node)
-    pub = rospy.Publisher(node, SphericalCoordinates, queue_size=50)
-
-    video = cv2.VideoCapture(0)
-    data_collection = False
-
-    with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=2, enable_segmentation=True) as pose:
-        
-        print("Press 'E' to capture the keypoints")
-
-        while not rospy.is_shutdown():
-            _, capture = video.read()
-            
-            skeleton_estimation_pose(pub, capture, pose, data_collection)
-
-            key = cv2.waitKey(1) 
-            if key == ord('e'):
-                data_collection = not data_collection
-                if data_collection:
-                    rospy.loginfo("Publishing the data on 'arm_keypoint_capture'")
-                else:
-                    rospy.loginfo("Publishing of data stopped")
-
-            # TODO: Quit ROS 
-            if key == ord('q'):
-                rospy.logwarn("Closing the program")
-                break
-
-        video.release()
-        cv2.destroyAllWindows()
-
-if __name__ == "__main__":
-    try:
-        video_capture()
-    except rospy.ROSInterruptException as e:
-        rospy.logfatal(f"Following exception caused the program to stop: {e}")
diff --git a/pose_estimation/scripts/data_collection.py b/pose_estimation/scripts/data_collection.py
new file mode 100755
index 0000000000000000000000000000000000000000..d05ac1d8dd8575ecdb83f8a7a991fb29bca9eddf
--- /dev/null
+++ b/pose_estimation/scripts/data_collection.py
@@ -0,0 +1,492 @@
+## TODO:
+# Save the file into data folder (?)
+
+
+import numpy as np
+import mediapipe as mp
+
+from PIL import Image
+import rospy
+import cv2
+import os
+import pickle
+import math
+import vg
+import torch
+import tf
+
+from mediapipe.framework.formats import landmark_pb2
+
+
+CONTROL = rospy.get_param('/keypoint_collection/manipulator_control')
+
+if not CONTROL:
+    FILENAME = rospy.get_param('/keypoint_collection/filename')
+    VIDEO_SAVE = rospy.get_param('/keypoint_collection/video_save') 
+    save_file = os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/{FILENAME}")
+
+else: 
+    ik_model = rospy.get_param('/keypoint_collection/ik_model')
+    CONTROL_MODEL = os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/models/{ik_model}")
+
+MODEL = rospy.get_param('/keypoint_collection/gesture_model')
+model_file = os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/{MODEL}")
+    
+
+### TODO: FILENAME CHANGE
+
+
+
+# Dictionary of different modes/stages of data collection
+COLLECTION_MODES = {
+    "Setup": 0,
+    "Calibration": 1,
+    "Collection": 2,
+    "Terminate": 3,
+    "Finished": 4,
+    "Control": 5,
+}
+
+MANIPULATOR_MAX_DIST = 0.38
+CONFIDENCE_SCORE = 0.6
+
+### Mediapipe Gesture Recognition
+BaseOptions = mp.tasks.BaseOptions
+GestureRecognizer = mp.tasks.vision.GestureRecognizer
+GestureRecognizerOptions = mp.tasks.vision.GestureRecognizerOptions
+VisionRunningMode = mp.tasks.vision.RunningMode
+
+### Mediapipe pose estimation
+mp_pose = mp.solutions.pose
+
+### Mediapipe drawing
+mp_drawing = mp.solutions.drawing_utils 
+mp_drawing_styles = mp.solutions.drawing_styles
+mp_ds = mp_drawing_styles.get_default_pose_landmarks_style()
+
+### Arm Landmarks
+wrist_keypoint = mp_pose.PoseLandmark.RIGHT_WRIST.value
+elbow_keypoint = mp_pose.PoseLandmark.RIGHT_ELBOW.value
+shoulder_keypoint = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
+
+arm_landmarks = [shoulder_keypoint, elbow_keypoint, wrist_keypoint]
+
+def create_file():
+    rospy.loginfo(f"Creating file: {save_file}")
+    with open(save_file, 'wb') as handle:
+        pickle.dump({}, handle, protocol=pickle.HIGHEST_PROTOCOL)
+
+def save_dataset(data):
+    rospy.loginfo("Saving the dataset...")
+    if not os.path.exists(save_file):
+        create_file()
+
+    # Read the file and append the input to it
+    captured_keypoints = None
+    with open(save_file, 'rb') as handle:
+        captured_keypoints = pickle.load(handle)
+        keys = list(captured_keypoints.keys())
+        idx = keys[-1] + 1 if len(keys) != 0 else 0
+        
+        captured_keypoints[idx] = data
+    
+    if captured_keypoints:
+        with open(save_file, 'wb') as handle:
+            rospy.loginfo(f"Saving a video with index: {idx}")
+            pickle.dump(captured_keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)
+            rospy.loginfo(f"The dataset has been saved correctly at '{save_file}'")
+
+def verify_gesture(gesture, expected_gesture, score):
+    # Checking the the gesture is the expected gesture and if the confidence is high
+    return gesture == expected_gesture and score >= CONFIDENCE_SCORE
+
+def gesture_recognition(recognizer, capture, frame_timestamp_ms, mode):
+    # Converting the image to Mediapipe image type
+    pil_img = Image.fromarray(capture) 
+    mp_image = mp.Image(image_format = mp.ImageFormat.SRGB, data = np.asarray(pil_img))
+    
+    # Using the model to get the gesture recognition from video
+    gesture_recognition_result = recognizer.recognize_for_video(mp_image, frame_timestamp_ms)
+
+    if len(gesture_recognition_result.gestures) != 0:
+        # Getting the highest ranked gesture and its confidence score
+        gesture = gesture_recognition_result.gestures[0][0].category_name
+        score = gesture_recognition_result.gestures[0][0].score
+        
+        # If the mode is set to setup, searching for a Thumbs up to start calibration
+        if mode == COLLECTION_MODES["Setup"]:
+            if verify_gesture(gesture, "Thumb_Up", score):
+                rospy.loginfo("\n*** CALIBRATION MODE ***")
+                return COLLECTION_MODES["Calibration"]
+        # If the mode is set to collection, seraching for a Thumbs up or down to stop the collection
+        elif mode == COLLECTION_MODES["Collection"]:
+            if verify_gesture(gesture, "Thumb_Up", score):
+                rospy.loginfo("Detected Thumbs Up...")
+                rospy.loginfo("\n*** Collection Over ***")
+                return COLLECTION_MODES["Finished"]
+            if verify_gesture(gesture, "Thumb_Down", score):
+                rospy.logwarn("\n***Program Terminated***")
+                return COLLECTION_MODES["Terminate"]
+        elif mode == COLLECTION_MODES["Control"]:
+            if verify_gesture(gesture, "Thumb_Down", score):
+                rospy.logwarn("\n***Program Terminated***")
+                return COLLECTION_MODES["Terminate"]
+
+
+    # Returing the mode
+    return mode
+
+
+def show_image(img, pose_landmarks = None):
+    # Drawing the landmarks on the frame of the video (i.e. keypoints)
+    if pose_landmarks:
+        mp_drawing.draw_landmarks(
+            img,
+            pose_landmarks,
+            landmark_drawing_spec=mp_ds
+        )
+    
+    # Displaying the current frame of the video
+    cv2.imshow("Pose Video", cv2.flip(img, 1))
+
+# TODO: Possibly add Visibility
+def get_landmarks(pose_result):
+
+    if not pose_result.pose_world_landmarks or not pose_result.pose_world_landmarks.landmark: return None
+
+    landmarks = {}
+    angle = None
+    for idx, keypoint in enumerate(pose_result.pose_world_landmarks.landmark):
+        if idx in arm_landmarks:
+            coordinates = (keypoint.x, keypoint.z, keypoint.y)
+            landmarks[idx] = coordinates
+    
+    elbow, wrist = np.array(landmarks[elbow_keypoint]), np.array(landmarks[wrist_keypoint])
+    wrist_vec = zero_out(wrist - elbow, 2)
+
+    angle = vg.angle(wrist_vec, np.array([-1, 0, 0]), units="rad")
+    if wrist[1] < elbow[1]:
+        angle = -angle
+    else:
+        angle = 0.0
+    
+    landmarks['angle'] = angle
+    return landmarks
+
+def test_path(model_path):
+    return os.path.exists(model_path)
+
+############# HELPER FUNCTIONS FOR KEYPOINT MANIPULATION #############
+ 
+### Check the visibility of a keypoint
+def verify_visibility(keypoint, visibility_threshold):
+    if keypoint.visibility <= visibility_threshold:
+        return False
+
+    return True
+
+### Convert keypoint locations to a numpy array
+def get_keypoint_as_np(keypoint):
+    return np.array([keypoint.x, keypoint.y, keypoint.z])
+
+### Takes in an array and zeros out a specified index
+def zero_out(arr, n):
+    new_arr = np.copy(arr)
+    new_arr[n] = 0
+    return new_arr
+
+### Euclidean distance between two vectors
+def euclid_dist(vec1, vec2):
+  if vec1.size != vec2.size:
+    rospy.logerr("Size of the two points doesn't match")
+    return None
+  
+  sum_ = 0
+  for i in range(vec1.size):
+    squared_difference = (vec1[i] - vec2[i])**2
+    sum_ += squared_difference
+  
+  return math.sqrt(sum_)
+
+########################################################
+
+def skeleton_estimation_pose(img, pose):
+    # Running mediapipe to extract the keypoints
+    results = pose.process(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
+
+    # Getting the pose landmarks from the resulting pose
+    pose_landmarks = results.pose_landmarks
+    
+    if not pose_landmarks: 
+        show_image(img)
+        return None
+
+    try:
+        # Getting the right arm keypoints
+        arm = [pose_landmarks.landmark[arm_keypoint] for arm_keypoint in arm_landmarks]
+        pose_landmarks = landmark_pb2.NormalizedLandmarkList(landmark = arm)
+    except Exception as e:
+        rospy.logerr(f"Exception: {e}")
+
+    # Displaying the video feed and the landmarks
+    show_image(img, pose_landmarks)
+    
+    return results
+
+
+def calculate_calibration(pose_result):
+    # Distance between the shoulder and the wrist
+    landmarks = get_landmarks(pose_result)
+
+    shoulder, wrist = landmarks[shoulder_keypoint], landmarks[wrist_keypoint]
+    shoulder, wrist = np.array(shoulder), np.array(wrist)
+
+    shifted_wrist = wrist - shoulder
+
+    # Checking if the keypoint is to the left side of the shoulder, i.e. if x is positive (because the image is inverted, otherwise it should be positive)
+    if shifted_wrist[0] > 0:
+        rospy.logerr("Wrist has to be to the right of the shoulder")    
+        return None
+
+    distance = euclid_dist(np.array([0,0,0]), shifted_wrist)
+
+    return MANIPULATOR_MAX_DIST / distance
+
+
+### TODO: Function
+# def calibrate():
+#     pass
+
+
+### TODO: CLASS
+# from training import Net
+import torch.nn as nn
+import torch.nn.functional as F
+
+class Net(nn.Module):
+  def __init__(self, input_num, output_num):
+    super(Net, self).__init__()
+    self.l1 = nn.Linear(input_num, 32)
+    self.l2 = nn.Linear(32, output_num)
+  def forward(self, x):
+    x = F.tanh(self.l1(x))
+    return self.l2(x)
+  
+def get_model(path):
+    model = Net(7, 4)
+    model.load_state_dict(torch.load(path)['model_state_dict'])
+    model.eval()
+    return model
+
+def video_capture(control = False):
+    video = cv2.VideoCapture(0)
+    video_fps = video.get(cv2.CAP_PROP_FPS)
+
+    counter, countdown = 0, -1
+    threshold = (video_fps * 100) + 100
+    calibration_result = None
+
+    MODE = COLLECTION_MODES["Setup"]
+
+    ### TODO: Verify before
+    if type(VIDEO_SAVE) != bool:
+        rospy.logfatal("video_save must be a boolean")
+        return None
+
+    if VIDEO_SAVE:
+        data = {"points": [], "video":[], "calib": None}  
+    else:
+        data = {"points": [], "calib": None}
+
+    if not control:
+        setup_info = """"*** Setup mode ***
+                        To calibrate the arm length with the manipulator, face the camera, 
+                        extend your **right** arm fully to the side (parallel to the ground) and gesture **Thumbs Up**.
+                        Once the gesture is recognized, a countdown will initiate and at 0 it will calibrate the distance.
+                        Important: Please try not to move your body other than the right arm."""
+        rospy.loginfo(setup_info)
+    else:
+        model = get_model(CONTROL_MODEL)
+
+    # Check if camera opened successfully
+    if (not video.isOpened() or not video): 
+        rospy.logerr("Error opening video stream or file")
+        return
+    
+    options = GestureRecognizerOptions(
+        base_options = BaseOptions(model_asset_path = model_file), 
+        running_mode = VisionRunningMode.VIDEO
+    )
+    
+    with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, model_complexity=2, enable_segmentation=True) as pose:    
+        # Create a gesture recognizer instance with the video mode:
+
+        with GestureRecognizer.create_from_options(options) as recognizer:
+            frame_idx = 0
+          
+            # Running while the user does not signify that the data collection is finished
+            while MODE != COLLECTION_MODES['Finished']:
+                frame_idx += 1
+
+                # If the program is shutdown or the user sets it to terminate, the data will not be saved 
+                if MODE == COLLECTION_MODES['Terminate'] or cv2.waitKey(1) & 0xFF == ord('q') or rospy.is_shutdown(): 
+                    # Release the video from memory and clear all the openCV windows
+                    video.release()
+                    cv2.destroyAllWindows()
+                  
+                    rospy.logwarn("Terminated the program, no data is saved")
+                  
+                    return None
+
+                _, capture = video.read()
+
+                try:   
+                    frame_ms = int((1000 * frame_idx) / video_fps)
+                    MODE = gesture_recognition(recognizer, capture, frame_ms, MODE)
+                
+                    if MODE == COLLECTION_MODES["Setup"]:
+                        show_image(capture)
+
+
+
+                    ### TODO: Function
+                    ### TODO: Max calibration rather than last
+                    elif MODE == COLLECTION_MODES['Calibration']:
+                        counter += (1000 / video_fps)
+                        if counter <= threshold:
+                            results = skeleton_estimation_pose(capture, pose)
+                            if results:
+                                calibration_result = results
+
+                            prev_count = countdown
+                            countdown = int((counter)/1000)
+                            
+                            if prev_count != countdown:
+                                print(3-countdown)
+                    
+                        else:
+                            if not calibration_result:
+                                rospy.logerr("No results found for calibration, Terminating...") 
+                                MODE = COLLECTION_MODES["Terminate"]
+                                continue
+
+                            data['calib'] = calculate_calibration(calibration_result)
+                            MODE = COLLECTION_MODES["Collection"] if not control else COLLECTION_MODES["Control"]
+                            
+                            if MODE == COLLECTION_MODES["Collection"]:
+                                rospy.loginfo("\n***Dataset Collection Started***\n\n-> Terminate: Thumbs Down (or CTRL+C)\n-> Finish and Save: Thumbs Up")
+                            else:
+                                rospy.loginfo("\n***Manipulation Started***\n\n-> Terminate: Thumbs Down (or CTRL+C)\n")
+
+                    elif MODE == COLLECTION_MODES["Collection"]:
+                        if VIDEO_SAVE:
+                            data['video'].append(cv2.flip(capture, 1))
+                        
+                        pose_results = skeleton_estimation_pose(capture, pose)
+                        
+                        if not pose_results: continue
+
+                        landmarks = get_landmarks(pose_results)
+                        # Checking if landmarks exist and if all the three keypoints have been found
+                        if landmarks:
+                            data['points'].append(landmarks)
+
+                    elif MODE == COLLECTION_MODES["Control"]:
+                        pose_results = skeleton_estimation_pose(capture, pose)
+                        if not pose_results: continue
+                        landmarks = get_landmarks(pose_results)
+                        
+                        ### TODO:
+                        control_manipulator(landmarks, model, data['calib'])
+                        return None
+                        
+
+                except Exception as e:
+                        # If there was an exception, the program should exit
+                        rospy.logwarn(f"Exception thrown: {e}")
+                        MODE = COLLECTION_MODES['Terminate']
+
+    # Release the video from memory and clear all the openCV windows
+    video.release()
+    cv2.destroyAllWindows()
+
+    return data
+
+
+
+
+
+####################### TODO: CODE FROM training_data.py ########################
+from open_manipulator_msgs.srv import SetJointPosition, SetJointPositionRequest
+
+PATH_TIME = 0.5
+SHOULDER = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
+WRIST = mp_pose.PoseLandmark.RIGHT_WRIST.value
+
+def set_wrist_angle(joint_angles):
+    rospy.wait_for_service('goal_joint_space_path', 2)
+    set_joints = rospy.ServiceProxy('goal_joint_space_path', SetJointPosition)
+
+    goal_request = SetJointPositionRequest()
+    goal_request.joint_position.joint_name = ["joint1", "joint2", "joint3", "joint4"]
+    goal_request.joint_position.position = joint_angles
+    goal_request.joint_position.max_accelerations_scaling_factor = 0.0
+    goal_request.joint_position.max_velocity_scaling_factor = 0.0
+    goal_request.path_time = PATH_TIME
+    resp = set_joints(goal_request)
+
+    rospy.sleep(PATH_TIME)
+
+    if not resp.is_planned:
+        rospy.logerr("Failed to solve IK equation for wrist")
+
+    return resp.is_planned
+
+
+def shift_keypoints(original, offset):
+    return (np.array(original) - np.array(offset))
+#############################################################################################################
+
+
+def control_manipulator(landmarks, model, calib):
+    ## Shifting the wrist to be relative to the shoulder at origin
+    wrist_point = shift_keypoints(landmarks[WRIST], landmarks[SHOULDER])
+    wrist_point[2] = -wrist_point[2]
+
+    ## Calculating the calibrated point
+    wrist_point = wrist_point * calib    
+
+    ## Converting the euler angle to a quaternion
+    angle = tf.transformations.quaternion_from_euler(0, landmarks['angle'], 0)
+
+    ## Concatenating the wrist point and angle to match the model input
+    model_input = np.concatenate((wrist_point, angle), axis = None)
+    
+    ## Predicting the joints based on the wrist point and angle
+    control_outputs = model(torch.Tensor(model_input))
+
+    ## Setting the manipulator's wrist angle
+    success = set_wrist_angle(control_outputs.tolist())
+
+
+
+def verify_params():
+    verified = test_path(model_file)
+    if CONTROL:
+        verified = verified and test_path(CONTROL_MODEL)
+
+        if verified:
+            ### Sleeping to let Gazebo launch
+            rospy.sleep(3)
+    return verified
+
+if __name__ == '__main__':
+    rospy.init_node("keypoint_collection")
+
+    if verify_params():
+        rospy.loginfo("All the parameters were verified")
+        data = video_capture(control = CONTROL)
+        if data: save_dataset(data = data)
+    else:
+        rospy.logfatal(f"Check if the model name exist and if they are correct. Check if they are in the correct directories")
+    
\ No newline at end of file
diff --git a/pose_estimation/scripts/pose_estimation_control.py b/pose_estimation/scripts/pose_estimation_control.py
deleted file mode 100755
index 38579a59aa4a203653ff97dedf6fdbae2f55b5bf..0000000000000000000000000000000000000000
--- a/pose_estimation/scripts/pose_estimation_control.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import rospy
-import math
-import itertools
-import numpy as np
-
-from math import sin, cos
-
-### Importing the messages for OpenManipulator's joint movement
-from open_manipulator_msgs.srv import SetJointPosition
-from open_manipulator_msgs.msg import JointPosition
-
-from geometry_msgs.msg import Pose
-from open_manipulator_msgs.srv import SetKinematicsPose
-from open_manipulator_msgs.msg import KinematicsPose
-
-### Importing the messages for Spherical Coordinates
-from pose_estimation.msg import SphericalCoordinates
-
-
-### Defining available joints by name
-JOINT_NAMES = ["joint1", "joint2", "joint3", "joint4"]
-
-### Variables for moving the gripper
-END_EFFECTOR_NAME = "gripper"
-PATH_TIME = 2
-
-# ### Define starting joint positions
-START_LOCATION = {
-    'x': 0.286,
-    'y': 0.0,
-    'z': 0.204,
-}
-
-# Calculating the distance between two points
-def dis(point1, point2):
-    return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2)
-
-
-# SOURCES: https://math.libretexts.org/Bookshelves/Calculus/Book%3A_Calculus_(OpenStax)/12%3A_Vectors_in_Space/12.07%3A_Cylindrical_and_Spherical_Coordinates
-def spherical_to_cartesian(d, theta, phi):
-    # TODO: Prove why change of cos and sin
-    # x = d * sin(phi) * cos(theta)
-    # y = d * sin(phi) * sin(theta)
-    # z = d * cos(phi)
-    x = d * sin(phi) * sin(theta)
-    y = d * sin(phi) * cos(theta)
-    z = d * sin(phi)
-
-    print(f"Distance: {d}")
-    coordinates = {
-        "x" : START_LOCATION['x'],
-        "y" : START_LOCATION['y'],
-        "z" : z
-    }
-
-    return coordinates
-
-
-def set_position(target_location):
-    set_kinematics_pose = rospy.ServiceProxy('goal_task_space_path_position_only', SetKinematicsPose)
-
-    pose = Pose()
-    pose.position.x = target_location['x']
-    pose.position.y = target_location['y']
-    pose.position.z = target_location['z']
-
-    kinematics_pose = KinematicsPose()
-    kinematics_pose.pose = pose
-
-    resp = set_kinematics_pose(end_effector_name = END_EFFECTOR_NAME, kinematics_pose=kinematics_pose, path_time = PATH_TIME)
-
-    if not resp.is_planned:
-        rospy.logerr("Failed to sovle IK equation")
-        print(f"X: {target_location['x']}; Y: {target_location['y']}; Z: {target_location['z']}")
-
-
-def video_capture(coordinate):
-    # TODO: Exception and stop program if connections fails
-    rospy.wait_for_service('goal_task_space_path_position_only', 2)
-
-    # Converting the spherical coordiantes received from pose estimation to cartesian coordinates
-    target_location = spherical_to_cartesian(coordinate.distance, coordinate.theta, coordinate.phi)
-
-    try:
-        # Setting the location of the robotic arm
-        set_position(target_location)
-    except rospy.ServiceException as e:
-        print("Service call failed %s"%e)
-
-    # Sleep equal to the time needed for the arm to move to the desired location
-    rospy.sleep(PATH_TIME)
-
-def pose_estimation():
-    rospy.init_node('pose_estimation_control')
-
-    # Moving the arm to the starting position
-    rospy.loginfo("Moving the arm to the starting location")
-    set_position(START_LOCATION)
-
-    print("Subscribing to `arm_keypoint_capture`")
-    rospy.Subscriber("arm_keypoint_capture", SphericalCoordinates, video_capture, queue_size = 1)
-
-    rospy.spin()
-
-
-if __name__ == '__main__':
-    try:
-        pose_estimation()
-    except rospy.ROSInterruptException as e:
-        print("Exception: " + e)
diff --git a/pose_estimation/scripts/training.py b/pose_estimation/scripts/training.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae9706cf7ee01934019afa85778dfa204822aadc
--- /dev/null
+++ b/pose_estimation/scripts/training.py
@@ -0,0 +1,170 @@
+import pickle
+
+# ### pip3 install torch torchvision torchaudio
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+import numpy as np
+import matplotlib.pyplot as plt
+
+class Net(nn.Module):
+
+  def __init__(self, input_num, output_num):
+    super(Net, self).__init__()
+    self.l1 = nn.Linear(input_num, 32)
+    # self.l2 = nn.Linear(32, 32)
+    self.output = nn.Linear(32, output_num)
+    # self.dropout = nn.Dropout(0.2)
+
+  def forward(self, x):
+    x = F.tanh(self.l1(x))
+    # x = F.tanh(self.dropout(self.l2(x)))
+    return self.output(x)
+
+def listToTensor(arr):
+    # arr_np = np.array([np.array(el) for el in arr])
+    return torch.tensor(arr)
+
+def process_dataset(dataset):
+    inputs = []
+    labels = []
+
+    for data in dataset:
+        actual_values = []
+        for joint in data['jointPositions']:
+            actual_values.append(joint.angle)
+        
+        labels.append(actual_values)
+
+        man_pos = data['manipulatorPositions']
+        man_angle = data['angle']
+        x = [man_pos.x, man_pos.y, man_pos.z, man_angle.x, man_angle.y, man_angle.z, man_angle.w]
+
+        inputs.append(x)
+
+    inputs, labels = remove_duplicates(inputs, labels)
+    
+    inputs = listToTensor(inputs)
+    labels = listToTensor(labels)
+
+    return (inputs, labels)
+
+def remove_duplicates(inputs, labels):
+    processed_inputs, processed_labels = [], []
+    
+    for i in range(len(inputs)):
+        
+        duplicates = []
+        for j in range(i+1, len(inputs)):
+            duplicates.append(inputs[i] == inputs[j])    
+        
+        if np.array(duplicates).any() == False: 
+            processed_inputs.append(inputs[i])
+            processed_labels.append(labels[i])
+
+    print(f"{len(inputs) - len(processed_inputs)} duplicates have been removed")
+    return processed_inputs, processed_labels
+
+
+### TODO: Out of range?
+### TODO: Y-Axis 
+
+## TODO: PATHS
+### os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/models/joint_predict.pt")
+
+
+### TODO: GIT HUB
+### TODO: Organize the training script
+
+def train(inputs, labels, model, criterion, optimizer):
+    model.train()
+    running_loss = 0.0
+    count = 0
+    for i, data in enumerate(zip(inputs, labels)):
+        # get the inputs; data is a list of [inputs, labels]
+        input, label = data
+
+        # zero the parameter gradients
+        optimizer.zero_grad()
+
+        # forward + backward + optimize
+        outputs = model(input)
+        loss = criterion(outputs, label)
+        loss.backward()
+        optimizer.step()
+        
+        running_loss += loss.item()
+        count += 1
+
+    train_loss = running_loss / count
+
+    return train_loss
+
+def save_model(model_dict, path):
+
+    path_dir = "/home/michal/catkin_ws/src/pose_estimation/data/models/"
+
+    print(f"Best model has been saved: {path_dir + path} \n")
+    print(f"    -> Best Loss: {model_dict['loss']}")
+    torch.save(model_dict, path_dir)
+
+
+if __name__ == '__main__':
+    dataset = {}
+    with open(r"/home/michal/catkin_ws/src/pose_estimation/data/keypoint_dataset.pickle", "rb") as input_file:
+        dataset = pickle.load(input_file)
+
+    print(f"There are {len(dataset)} points in the dataset")
+
+    inputs, labels = process_dataset(dataset)
+
+    model = Net(len(inputs[0]), len(labels[0]))
+
+    epochs = 200
+    # criterion = nn.MSELoss()
+    criterion = nn.HuberLoss()
+    optimizer = optim.Adam(model.parameters(), lr = 1e-1)
+    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=25, gamma=9e-1)
+    
+    best_model = None
+    history = {"loss": []}
+
+    for epoch in range(epochs):
+
+        train_loss = train(inputs, labels, model, criterion, optimizer)
+
+        if epoch % 10 == 0:
+            print(f'Epoch {epoch} loss: {train_loss}')
+            print(optimizer.param_groups[0]['lr'])
+
+        # scheduler.step()
+
+        if best_model is None or best_model['loss'] >= train_loss:
+            best_model = {
+                'epoch': epoch,
+                'loss': train_loss,
+                'model_state_dict': model.state_dict(),
+                'optimizer_state_dict': optimizer.state_dict()
+            }
+
+        history['loss'].append(train_loss)
+
+    print('Finished Training')
+    save_model(best_model, "joint_predict.pt")
+
+    plt.plot(history['loss'])
+    plt.show()
+
+
+#### 0.0051 ####
+
+# self.l1 = nn.Linear(input_num, 32)
+# tanh()
+# self.l2 = nn.Linear(32, output_num)
+
+# criterion = nn.HuberLoss()
+# optimizer = optim.Adam(model.parameters(), lr = 1e-1)
+# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=9e-1)
+##################
diff --git a/pose_estimation/scripts/training_data.py b/pose_estimation/scripts/training_data.py
new file mode 100755
index 0000000000000000000000000000000000000000..803cbc43d54659a607c9792581c3367fd7d1b768
--- /dev/null
+++ b/pose_estimation/scripts/training_data.py
@@ -0,0 +1,425 @@
+####### TODO ##########
+# 1. Calibration! Extreme points failed to solve
+#######################
+
+# 2 Clean 'training_data' file and roslaunch (simulation, show video?)
+# 4. Rename the file! ('pose_estimation')
+
+
+import pickle
+import rospy
+import tf
+import cv2
+import math
+import os
+import threading
+
+import numpy as np
+import mediapipe as mp
+from scipy.spatial.transform import Rotation as R
+from tqdm import tqdm
+
+from tf.transformations import quaternion_matrix, translation_matrix
+from geometry_msgs.msg import Point32
+from geometry_msgs.msg import Pose
+from pose_estimation.msg import JointPositions
+from sensor_msgs.msg import PointCloud
+from open_manipulator_msgs.msg import KinematicsPose
+from open_manipulator_msgs.srv import SetKinematicsPose, SetJointPosition, SetJointPositionRequest
+
+SIMULATE = rospy.get_param('/training_data/simulate')
+VIDEO_SHOW = rospy.get_param('/training_data/show_video')
+
+FILENAME = rospy.get_param('/training_data/input_keypoint_file')
+TRAINING_FILE = rospy.get_param('/training_data/training_file')
+
+OUT_OF_RANGE_THRESHOLD = 0.1
+THRESHOLD = 2
+NOISE = 0.03
+
+
+input_file = os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/{FILENAME}")
+training_file = os.path.expanduser(f"~/catkin_ws/src/pose_estimation/data/{TRAINING_FILE}")
+
+mp_pose = mp.solutions.pose
+
+# Defining ROS Node to publish the messages
+frame = 'keypoint_frame'
+node = 'keypoint_capture'
+
+SHOULDER = mp_pose.PoseLandmark.RIGHT_SHOULDER.value
+pub_shoulder = rospy.Publisher(f"{node}/shoulder", PointCloud, queue_size=50)
+
+WRIST = mp_pose.PoseLandmark.RIGHT_WRIST.value
+pub_wrist = rospy.Publisher(f"{node}/wrist", PointCloud, queue_size=50)
+
+ELBOW = mp_pose.PoseLandmark.RIGHT_ELBOW.value
+pub_elbow = rospy.Publisher(f"{node}/elbow", PointCloud, queue_size=50)
+
+### Variables for moving the gripper
+END_EFFECTOR_NAME = "gripper"
+PATH_TIME = 1
+
+### Starting joint positions and angles
+START_LOCATION = (0.286, 0.0, 0.204)
+START_ANGLE = 0.0
+
+def shift_keypoints(original, offset):
+    return (np.array(original) - np.array(offset))
+
+def publish_point(keypoint, offset, timestamp, pub):
+    point = Point32()
+    shifted_keypoint = shift_keypoints(keypoint, offset)
+    point.x, point.y, point.z = shifted_keypoint
+    point.z = -point.z
+
+    msg = PointCloud()
+    msg.header.frame_id = frame
+    msg.header.stamp = timestamp
+    msg.points = [point]
+
+    pub.publish(msg)
+
+    return point
+
+def set_manipulator(manipulator_pos):
+    rospy.wait_for_service('goal_task_space_path_position_only', 2)
+    
+    set_kinematics_pose = rospy.ServiceProxy('goal_task_space_path_position_only', SetKinematicsPose)
+
+    pose = Pose()
+    pose.position.x = manipulator_pos[0]
+    pose.position.y = manipulator_pos[1]
+    pose.position.z = manipulator_pos[2]
+
+    kinematics_pose = KinematicsPose()
+    kinematics_pose.pose = pose
+
+    resp = set_kinematics_pose(planning_group = 'arm', end_effector_name = END_EFFECTOR_NAME, kinematics_pose = kinematics_pose, path_time = PATH_TIME)
+    
+    if resp.is_planned:
+        rospy.sleep(PATH_TIME)
+    
+    return resp.is_planned
+
+def set_wrist_angle(angle, starting = False):
+    if starting:
+        rospy.wait_for_service('goal_joint_space_path', 2)
+        set_joints = rospy.ServiceProxy('goal_joint_space_path', SetJointPosition)
+    else: 
+        rospy.wait_for_service('goal_joint_space_path_from_present', 2)
+        set_joints = rospy.ServiceProxy('goal_joint_space_path_from_present', SetJointPosition)
+
+    goal_request = SetJointPositionRequest()
+    goal_request.joint_position.joint_name = ["joint1", "joint2", "joint3", "joint4"]
+    goal_request.joint_position.position = [0.0, 0.0, 0.0, angle]
+    goal_request.joint_position.max_accelerations_scaling_factor = 0.0
+    goal_request.joint_position.max_velocity_scaling_factor = 0.0
+    goal_request.path_time = PATH_TIME
+    resp = set_joints(goal_request)
+
+    rospy.sleep(PATH_TIME)
+
+    if not resp.is_planned:
+        rospy.logerr("Failed to solve IK equation for wrist")
+
+    return resp.is_planned
+
+def point_to_np(point):
+    return np.array([point.x, point.y, point.z])
+
+def np_to_point(arr):
+    point = Point32()
+    point.x, point.y, point.z = arr[0], arr[1], arr[2]
+    return point
+
+def homogenous_transformation(old_position, transform_matrix):
+    # Converting the position to numpy array and appending one to it to make it a 4x1 vector
+    old_position_vect = point_to_np(old_position)
+    old_position_vect = np.append(old_position_vect, [1])
+    old_position_vect = old_position_vect.reshape((4,1))
+
+    # Multiplying the old point with the transform matrix to get the point in the other coordinate frame
+    transformed_point = np.matmul(transform_matrix, old_position_vect)
+
+    return np_to_point(transformed_point)
+
+def create_matrix(rot, trans):
+    # Converting the quaternion and position into matrices
+    rot_matrix = quaternion_matrix(rot)
+    trans_matrix = translation_matrix(trans)
+
+    # Combining the two matrices to create a 4x4 homogenous transformation matrix
+    rot_matrix = rot_matrix[:,:3]
+    trans_matrix = np.array([trans_matrix[:,3]])
+    transform_matrix = np.concatenate((rot_matrix, trans_matrix.T), axis=1)
+
+    return transform_matrix
+
+def broadcast(point, node_from, node_to, time, orientation = (0,0,0)):
+    br = tf.TransformBroadcaster()
+
+    br.sendTransform(
+        (point.x, point.y, point.z),
+        tf.transformations.quaternion_from_euler(orientation[0], orientation[1], orientation[2]),
+        time,
+        node_from,
+        node_to
+    )
+
+def tf_listener(old_frame, new_frame, time):
+    trans, rot = None, None
+    listener = tf.TransformListener()
+
+    try: 
+        listener.waitForTransform(new_frame, old_frame, time, rospy.Duration(1.5))
+        (trans, rot) = listener.lookupTransform(new_frame, old_frame,  time)
+    except Exception as e:
+        rospy.logwarn(f'Exception: {e}')
+
+    return trans, rot
+
+def create_transform_matrix(trans, rot):
+    if not trans or not rot:
+        return None
+    
+    return create_matrix(rot, trans)
+
+def show_video(video):
+    # Displaying the captured video frames 
+    cv2.imshow("Test video", video)
+
+def iterative_movement(current_loc, desired_loc):
+    # Checks how many times the IK failed to be solved
+    failure = 0
+    backtracked = False
+    # Iterating while we reach the set limit of failures
+    while failure < (THRESHOLD+1): 
+        # If the previous movement was successful, 
+        # we attempt to move the arm to the desired location
+        if failure == 0 or backtracked:
+            # Trying to move to the desired location
+            success = set_manipulator(desired_loc)
+            
+            # If desired location reached, finish
+            if success: break
+
+        # Calculating the midpoint between the desired location
+        # and current location, and then trying to move there
+        midpoint = (desired_loc + current_loc) / 2.
+        success = set_manipulator(midpoint)
+
+        # If failed to move to the midpoint, increase failure count
+        if not success:
+            failure += 1
+            
+            # Trying to move out, in case it is stuck
+            if failure == THRESHOLD:
+                
+                if backtracked: break
+
+                midpoint = (current_loc * 0.9)
+                success = set_manipulator(midpoint)
+
+                if not success:
+                    failure += 1
+                else:
+                    backtracked = True
+                    failure -= 1
+
+        # Else change the current location to the midpoint and reset failures
+        if success:
+            current_loc = midpoint
+            failure = 0
+    
+    # If moved to the desired location return True and the current location
+    if failure == 0:
+        rospy.logdebug(f"-> *** Reached Desired Location")
+        return True, desired_loc
+    
+    # Else return False and the closest possible location
+    rospy.logdebug(f"-> Failed to Reach the Destination - Final Location: {current_loc}\n")
+    return False, current_loc
+
+def euclid_dist(vec1, vec2):
+  if vec1.size != vec2.size:
+    print("Size of the two points doesn't match")
+    return None
+  
+  sum_ = 0
+  for i in range(vec1.size):
+    squared_difference = (vec1[i] - vec2[i])**2
+    sum_ += squared_difference
+  
+  return math.sqrt(sum_)
+
+def range_convert(old_range, new_range, old_value):
+    old_diff = (old_range[1] - old_range[0])
+    new_diff = (new_range[1] - new_range[0])
+
+    new_value = (((old_value - old_range[0]) * new_diff) / old_diff) + new_range[0]
+    return new_value
+
+### Getting the relative wrist
+def get_relative_point(frame1, frame2, ros_time, point, rotation = True):
+
+    trans, rot = tf_listener(frame1, frame2, ros_time)
+
+    if rotation == False:
+        rot = [0.0, 0.0, 0.0, 1.0]
+
+    transform = create_transform_matrix(trans, rot)
+
+    if transform is None:
+        return None
+    
+    return homogenous_transformation(point, transform)
+
+def save_point(dataset, input_point, output_point):
+    dataset['x'].append(input_point)
+    dataset['y'].append(output_point)
+    return dataset
+
+def verify_inputs():
+    verify = True
+
+    if type(SIMULATE) != bool or type(VIDEO_SHOW) != bool:
+        rospy.logfatal("'simulate' and 'video_show' arguments must be booleans")
+        verify = False
+
+    if not os.path.exists(input_file):
+        rospy.logfatal(f"The following file '{input_file}' does not exist")
+        verify = False
+
+    return verify
+
+def data_gather():
+    # Reading the pickle file and saving its content as a dictionary
+    ### TODO: Function
+    rospy.loginfo("Reading the file...")
+    with open(input_file, 'rb') as handle:
+        captured_keypoints = pickle.load(handle)
+    rospy.loginfo("File read successfully")
+    
+    kinematics_pub = rospy.Publisher('captured_keypoints', KinematicsPose, queue_size = 10)
+
+    for idx in captured_keypoints.keys():
+        rospy.loginfo(f"Processing video with {idx}")
+        points = captured_keypoints[idx]['points']
+
+
+        # TODO: Correct calibration ? Noise?
+        calib = 1.0 - captured_keypoints[idx]['calib'] + 0.025
+
+        for point_idx, point in enumerate(points):
+            if VIDEO_SHOW:
+                if cv2.waitKey(1) & 0xFF == ord('q'):
+                    rospy.logwarn("Closing the video")
+                    return
+                
+                video = captured_keypoints[idx]['video'][point_idx]
+                show_video(video)
+
+
+            if rospy.is_shutdown(): return
+            
+            
+            angle = point['angle']
+
+            broadcast_time_1 = rospy.Time.now()
+            shoulder_point = publish_point(point[SHOULDER], point[SHOULDER], broadcast_time_1, pub_shoulder)
+            wrist_point = publish_point(point[WRIST], point[SHOULDER], broadcast_time_1, pub_wrist)
+            broadcast(shoulder_point, f"{node}/shoulder", frame, broadcast_time_1, orientation=(np.pi, 0.0, 0.0))
+            relative_wrist = get_relative_point(frame, f"{node}/shoulder", broadcast_time_1, wrist_point)
+            
+
+            if relative_wrist is None:
+                continue
+        
+            broadcast_time_2 = rospy.Time.now()
+            broadcast(relative_wrist, f"{node}/wrist", f"{node}/shoulder", broadcast_time_2)
+            manipulator_position = np_to_point(point_to_np(relative_wrist) * calib)
+            relative_manipulator = get_relative_point(f"{node}/shoulder", f"{node}/wrist", broadcast_time_2, manipulator_position)
+            elbow_point = publish_point(point[ELBOW], point[WRIST], broadcast_time_2, pub_elbow)
+
+            if relative_manipulator is None:
+                continue
+            
+            broadcast_time_3 = rospy.Time.now()
+            broadcast(relative_manipulator, "/gripper/kinematics_pose", f"{node}/wrist", broadcast_time_3)
+            broadcast(np_to_point(np.zeros((3,))), "/gripper/wrist", "/gripper/kinematics_pose", broadcast_time_3, orientation=(np.pi, angle, np.pi))
+
+            # Getting the desired location
+            desired_loc = point_to_np(relative_manipulator).flatten()
+
+
+
+            ############################################
+            ### TODO: Test if correct wrist angle
+            ### TODO: Y-axis
+            ### TODO: Function
+
+            ### TODO: orientation -> work with TF             
+            # rotation_vec = -(np.pi / 8.) * np.array([0, 1, 0])
+            # rotation = R.from_rotvec(rotation_vec)
+            # desired_loc = rotation.apply(desired_loc)
+
+            pose = Pose() 
+            pose.position.x = desired_loc[0]
+            pose.position.y = 0 # desired_loc[1] 
+            pose.position.z = desired_loc[2] 
+
+            # print(desired_loc)
+            orientation = tf.transformations.quaternion_from_euler(0, angle, 0)
+            pose.orientation.x = orientation[0]
+            pose.orientation.y = orientation[1]
+            pose.orientation.z = orientation[2]
+            pose.orientation.w = orientation[3]
+
+
+            kinematics_pose = KinematicsPose()
+            kinematics_pose.pose = pose
+
+            # Won't unlock until received a message back
+            lock.acquire()
+            kinematics_pub.publish(kinematics_pose)
+            ###########################################
+
+
+
+def process_joint_positions(joint_position):
+    save = True if joint_position.success or len(joint_position.jointPositions) != 0 else False
+    
+    if save:
+        TRAINING_DATA.append({
+            "jointPositions": joint_position.jointPositions,
+            "manipulatorPositions": joint_position.manipulatorPose.pose.position,
+            "angle": joint_position.manipulatorPose.pose.orientation
+        })
+    else:
+        rospy.logerr(f"Point out of range: {joint_position.manipulatorPose.pose.position}")  
+
+    lock.release()
+
+
+if __name__ == '__main__':
+    lock = threading.Lock()
+    TRAINING_DATA = []
+
+    rospy.init_node(node)
+    rospy.Subscriber("inverse_kinematics_keypoints", JointPositions, process_joint_positions)
+
+    try:  
+        # rospy.loginfo("Sleeping for 1 second")
+        # rospy.sleep(1)
+        if verify_inputs(): data_gather()
+        
+    except rospy.ROSInterruptException as e:
+        rospy.logwarn(f"Exception: {e}")
+    
+    lock.acquire()
+    rospy.loginfo("Saving training dataset")
+    with open(training_file, 'wb') as handle:
+        pickle.dump(TRAINING_DATA, handle, protocol=pickle.HIGHEST_PROTOCOL)
+    lock.release()
+
diff --git a/pose_estimation/src/inverse_kinematics_solver.cpp b/pose_estimation/src/inverse_kinematics_solver.cpp
new file mode 100755
index 0000000000000000000000000000000000000000..6e266aa06ba10cd3b0e6b4eb4bc65951e3ad1909
--- /dev/null
+++ b/pose_estimation/src/inverse_kinematics_solver.cpp
@@ -0,0 +1,72 @@
+
+#include "pose_estimation/inverse_kinematics_solver.h"
+
+InverseKinematicsSolver::InverseKinematicsSolver(bool using_platform, std::string usb_port, std::string baud_rate, double control_period)
+{
+    log::info("Setting up the IK Solver for Open Manipulator");
+
+    open_manipulator_.initOpenManipulator(using_platform, usb_port, baud_rate, control_period);
+    goal_joint_value_ = new std::vector<JointValue>();
+
+    kinematics_ = new kinematics::SolverUsingCRAndSRPositionOnlyJacobian();
+    open_manipulator_.addKinematics(kinematics_);
+    log::info("Kinematics Solver Set 'SolverUsingCRandSRPoisionOnlyJacobian'");
+
+    ik_pub_ = n_.advertise<pose_estimation::JointPositions>("inverse_kinematics_keypoints", 1000);
+
+    log::info("Completed setting up the IK Solver");
+}
+
+void InverseKinematicsSolver::solveIK(Pose target_pose, const open_manipulator_msgs::KinematicsPose& manipulator_pose) {
+    pose_estimation::JointPositions msg;
+    
+    // TODO: Debug Mode
+    // std::cout << target_pose.kinematic.position << std::endl;
+    bool solved = open_manipulator_.solveInverseKinematics("gripper", target_pose, goal_joint_value_);
+    
+
+    // ? IF FAILED, CHECK THE CLOSEST POSITION
+
+
+    int idx = 0;
+    auto names = open_manipulator_.getManipulator()->getAllActiveJointComponentName();	
+    for(auto &point : *goal_joint_value_) {
+        pose_estimation::JointAngle joint;
+        joint.angle = point.position;
+        joint.name = names.at(idx);
+        msg.jointPositions.push_back(joint);
+
+        idx++;
+    }
+
+    msg.success = solved;
+
+    msg.manipulatorPose = manipulator_pose;
+    ik_pub_.publish(msg);
+    log::info("Published the point");
+
+}
+
+void InverseKinematicsSolver::keypointsCallback(const open_manipulator_msgs::KinematicsPose& msg) {
+    Eigen::Vector3d position;
+    position[0] = msg.pose.position.x;
+    position[1] = msg.pose.position.y;
+    position[2] = msg.pose.position.z;
+
+    Pose target_pose = { position };
+    solveIK(target_pose, msg);
+}
+
+
+int main(int argc, char **argv)
+{
+    ros::init(argc, argv, "inverse_kinematics_solver");
+    
+    InverseKinematicsSolver ik_solver(false, "/dev/ttyUSB0", "1000000", 0.010);
+    
+    auto n = ik_solver.getNodeHandle();
+    ros::Subscriber sub = n.subscribe("captured_keypoints", 100, &InverseKinematicsSolver::keypointsCallback, &ik_solver);
+    
+    ros::spin();
+    return 0;
+}
\ No newline at end of file