diff --git a/cvssp_turtlebot_multiple_sim b/cvssp_turtlebot_multiple_sim
new file mode 160000
index 0000000000000000000000000000000000000000..d51c83c7d3c3a4a1f56e403b2c090c52d9c4126d
--- /dev/null
+++ b/cvssp_turtlebot_multiple_sim
@@ -0,0 +1 @@
+Subproject commit d51c83c7d3c3a4a1f56e403b2c090c52d9c4126d
diff --git a/hypobot_system/CMakeLists.txt b/hypobot_system/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fea7bf97376d9e71b9323b72a0b13bfeb3fd2584
--- /dev/null
+++ b/hypobot_system/CMakeLists.txt
@@ -0,0 +1,214 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(hypobot_system)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+  geometry_msgs
+  roscpp
+  rospy
+  rviz
+  std_msgs
+  message_generation
+)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+##   your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+##   * add a build_depend tag for "message_generation"
+##   * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+##   * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+##     but can be declared for certainty nonetheless:
+##     * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+##   * add "message_generation" and every package in MSG_DEP_SET to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * add "message_runtime" and every package in MSG_DEP_SET to
+##     catkin_package(CATKIN_DEPENDS ...)
+##   * uncomment the add_*_files sections below as needed
+##     and list every .msg/.srv/.action file to be processed
+##   * uncomment the generate_messages entry below
+##   * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+add_message_files(
+   FILES
+   event_via_pose.msg
+   event_via_img.msg
+)
+
+## Generate services in the 'srv' folder
+add_service_files(
+    FILES
+    UpdateInventory.srv
+    GetInventory.srv
+)
+
+## Generate actions in the 'action' folder
+# add_action_files(
+#   FILES
+#   Action1.action
+#   Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+generate_messages(
+   DEPENDENCIES
+   sensor_msgs
+   std_msgs
+   geometry_msgs
+ )
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+##   * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+##   * add "dynamic_reconfigure" to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * uncomment the "generate_dynamic_reconfigure_options" section below
+##     and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+#   cfg/DynReconf1.cfg
+#   cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+#  INCLUDE_DIRS include
+#  LIBRARIES mini_delivery
+  CATKIN_DEPENDS geometry_msgs roscpp rospy rviz std_msgs message_runtime
+#  DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+  ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+#   src/${PROJECT_NAME}/mini_delivery.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+## add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/mini_delivery_node.cpp)
+#add_executable(client_request ${catkin_LIBRARIES})
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+#   ${catkin_LIBRARIES}
+# )
+#target_link_libraries(client_request ${catkin_LIBRARIES})
+
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# install(PROGRAMS
+#   scripts/my_python_script
+#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+#   FILES_MATCHING PATTERN "*.h"
+#   PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+#   # myfile1
+#   # myfile2
+#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_mini_delivery.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
\ No newline at end of file
diff --git a/hypobot_system/launch/multiple_bot_rescue.launch b/hypobot_system/launch/multiple_bot_rescue.launch
new file mode 100644
index 0000000000000000000000000000000000000000..4eabbd2ed8809d40c1d3e6b0f1479538e9906cf3
--- /dev/null
+++ b/hypobot_system/launch/multiple_bot_rescue.launch
@@ -0,0 +1,20 @@
+<launch>
+    <rosparam command="load" file="$(find hypobot_system)/params/robots.yaml"/>
+    <rosparam command="load" file="$(find hypobot_system)/params/system.yaml"/>
+    <rosparam command="load" file="$(find hypobot_system)/params/environment.yaml"/>
+    <node pkg="hypobot_system" name="manage_response_to_events" type="central_manager.py" output="screen"/>
+    <node pkg="tf" type="static_transform_publisher" name="map_conv" args="36.8 35.3 0 1.9 0 0 /butthead/map /butthead/map-gt 100"/>
+
+    <!-- Change ns and robot_name as appropriate -->
+    <group ns="butthead">
+        <param name="robot_name" type="str" value="butthead" />
+        <node pkg="hypobot_system" name="inventory_server_one" type="inventory_server.py" output="screen">
+        </node>
+    </group>
+
+    <group ns="stimpy">
+        <param name="robot_name" type="str" value="stimpy" />
+        <node pkg="hypobot_system" name="inventory_server_two" type="inventory_server.py" output="screen">
+        </node>
+    </group>
+</launch>
\ No newline at end of file
diff --git a/hypobot_system/launch/single_bot_rescue.launch b/hypobot_system/launch/single_bot_rescue.launch
new file mode 100644
index 0000000000000000000000000000000000000000..27bf6237f9e7d45953630264ad8e2a1164a51188
--- /dev/null
+++ b/hypobot_system/launch/single_bot_rescue.launch
@@ -0,0 +1,13 @@
+<launch>
+    <rosparam command="load" file="$(find hypobot_system)/params/robots.yaml"/>
+    <rosparam command="load" file="$(find hypobot_system)/params/system.yaml"/>
+    <rosparam command="load" file="$(find hypobot_system)/params/environment.yaml"/>
+    <node pkg="hypobot_system" name="manage_response_to_events" type="central_manager.py" output="screen"/>
+    <node pkg="tf" type="static_transform_publisher" name="map_conv" args="36.8 35.3 0 1.9 0 0 /butthead/map /butthead/map-gt 100"/>
+    <!-- Change ns and robot_name as appropriate -->
+    <group ns="butthead">
+        <param name="robot_name" type="str" value="butthead" />
+        <node pkg="hypobot_system" name="inventory_server" type="inventory_server.py" output="screen">
+        </node>
+    </group>
+</launch>
\ No newline at end of file
diff --git a/hypobot_system/models/classification/best_cls_net.pth b/hypobot_system/models/classification/best_cls_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6b3a1012d0d0246e4a0e0ce69d5410bd18314ffd
Binary files /dev/null and b/hypobot_system/models/classification/best_cls_net.pth differ
diff --git a/hypobot_system/models/classification/google_model.py b/hypobot_system/models/classification/google_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..ede08aa7731c720cb5cd274276a83674d84cb4a7
--- /dev/null
+++ b/hypobot_system/models/classification/google_model.py
@@ -0,0 +1,132 @@
+import torch
+import torch.nn as nn
+import matplotlib.pyplot as plt
+import torch.optim as optim
+import numpy as np
+
+# https://arxiv.org/pdf/1409.4842.pdf
+
+class GoogLeNet(nn.Module):
+    def __init__(self, in_channels=3, classes=4):
+        super(GoogLeNet, self).__init__()
+
+        self.conv1 = ConvBlock(in_channels=in_channels, out_channels=64, kernel_size=7, stride=2, padding=3)
+        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+        self.conv2 =  nn.Sequential(
+            ConvBlock(64, 64, kernel_size=1, stride=1, padding=0),
+            ConvBlock(64, 192, kernel_size=3, stride=1, padding=1)
+        )
+
+        self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        self.incept3a = InceptionBlock(192,64,96,128,16,32,32)
+        self.incept3b = InceptionBlock(256,128,128,192,32,96,64)
+        self.maxpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        self.incept4a = InceptionBlock(480,192,96,208,16,48,64)
+        self.aux1 = AuxBlock(512, classes)
+        self.aux2 = AuxBlock(528, classes)
+
+        self.incept4b = InceptionBlock(512,160,112,224,24,64,64)
+        self.incept4c = InceptionBlock(512,128,128,256,24,64,64)
+        self.incept4d = InceptionBlock(512,112,144,288,32,64,64)
+        self.incept4e = InceptionBlock(528,256,160,320,32,128,128)
+        self.maxpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        self.incept5a = InceptionBlock(832,256,160,320,32,128,128)
+        self.incept5b = InceptionBlock(832,384,192,384,48,128,128)
+
+        self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)
+        self.dropout = nn.Dropout(p=0.4)
+        self.fc1 = nn.Linear(1024, 4)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.maxpool1(x)
+        x = self.conv2(x)
+        x = self.maxpool2(x)
+
+        x = self.incept3a(x)
+        x = self.incept3b(x)
+        x = self.maxpool3(x)
+
+        x = self.incept4a(x)
+
+        aux_out1 = self.aux1(x)
+
+        x = self.incept4b(x)
+        x = self.incept4c(x)
+        x = self.incept4d(x)
+
+        aux_out2 = self.aux2(x)
+
+        x = self.incept4e(x)
+        x = self.maxpool4(x)
+        x = self.incept5a(x)
+        x = self.incept5b(x)
+        x = self.avgpool(x)
+        x = x.reshape(x.shape[0], -1)
+
+        x = self.dropout(x)
+        x = self.fc1(x)
+
+        return x, aux_out1, aux_out2
+
+class AuxBlock(nn.Module):
+    def __init__(self, x, classes):
+        super(AuxBlock, self).__init__()
+        self.avgpool2 = nn.AvgPool2d(stride=3, kernel_size=5)
+        self.conv3 = ConvBlock(x, 128, kernel_size=1, stride=1)
+        self.relu = nn.ReLU()
+        self.fc = nn.Linear(4 * 4 * 128, 1024)
+        self.dropout = nn.Dropout(p=0.7)
+        self.classifier = nn.Linear(1024, classes)
+
+    def forward(self,input):
+        N = input.shape[0]
+        x = self.avgpool2(input)
+        x = self.conv3(x)
+        x = self.relu(x)
+        x = x.reshape(N, -1)
+        x = self.fc(x)
+        x = self.dropout(x)
+        x = self.classifier(x)
+
+        return x
+
+class InceptionBlock(nn.Module):
+    def __init__(self, in_channels, post_1x1, pre_3x3, post_3x3, pre_5x5, post_5x5, post_pool):
+        super(InceptionBlock, self).__init__()
+
+        self.branch1 = ConvBlock(in_channels, post_1x1, kernel_size=1)
+        self.branch2 = nn.Sequential(
+            ConvBlock(in_channels, pre_3x3, kernel_size=1),
+            ConvBlock(pre_3x3, post_3x3, kernel_size=3, padding=1)
+        )
+        self.branch3 = nn.Sequential(
+            ConvBlock(in_channels, pre_5x5, kernel_size=1),
+            ConvBlock(pre_5x5, post_5x5, kernel_size=5, padding=2)
+        )
+        self.branch4 = nn.Sequential(
+            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
+            ConvBlock(in_channels, post_pool, kernel_size=1)
+        )
+
+    def forward(self, x):
+        return torch.cat([self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)],1)
+
+class ConvBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, **kwargs):
+        super(ConvBlock, self).__init__()
+        self.relu = nn.ReLU()
+        self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
+        # https://arxiv.org/abs/1502.03167
+        self.bnorm = nn.BatchNorm2d(out_channels)
+
+    def forward(self, x):
+        x = self.conv(x)
+        x = self.bnorm(x)
+        return self.relu(x)
+
+if __name__ == '__main__':    
+    GoogLeNet()
\ No newline at end of file
diff --git a/hypobot_system/models/regression/150_best_net.pth b/hypobot_system/models/regression/150_best_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f2996617d3c4ee51ef8ba4e139645336c4f0aaa8
Binary files /dev/null and b/hypobot_system/models/regression/150_best_net.pth differ
diff --git a/hypobot_system/models/regression/data_loader.py b/hypobot_system/models/regression/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5bf411eda66053390cfb837814368592eed4df4
--- /dev/null
+++ b/hypobot_system/models/regression/data_loader.py
@@ -0,0 +1,127 @@
+""" Owned By - https://github.com/youngguncho/PoseNet-Pytorch """
+
+
+
+
+import os
+import time
+import copy
+import torch
+import torchvision
+import pandas as pd
+import numpy as np
+import torch.nn as nn
+import torch.optim as optim
+from torch.optim import lr_scheduler
+from torch.utils.data import Dataset, DataLoader
+from torchvision import transforms, models, datasets
+import torch.nn.functional as F
+import matplotlib.pyplot as plt
+from PIL import Image
+
+
+class CustomDataset(Dataset):
+    def __init__(self, image_path, metadata_path, mode, transform, num_val=100):
+        self.image_path = image_path
+        self.metadata_path = metadata_path
+        self.mode = mode
+        self.transform = transform
+        raw_lines = open(self.metadata_path, 'r').readlines()
+        self.lines = raw_lines[3:]
+
+        self.test_filenames = []
+        self.test_poses = []
+        self.train_filenames = []
+        self.train_poses = []
+
+        for i, line in enumerate(self.lines):
+            splits = line.split()
+            filename = splits[0]
+            values = splits[1:]
+            values = list(map(lambda x: float(x.replace(",", "")), values))
+
+            filename = os.path.join(self.image_path, filename)
+
+            if self.mode == 'train':
+                # if i > num_val:
+                self.train_filenames.append(filename)
+                self.train_poses.append(values)
+            elif self.mode == 'test':
+                self.test_filenames.append(filename)
+                self.test_poses.append(values)
+            elif self.mode == 'val':
+                self.test_filenames.append(filename)
+                self.test_poses.append(values)
+                if i > num_val:
+                    break
+            else:
+                assert 'Unavailable mode'
+
+        self.num_train = self.train_filenames.__len__()
+        self.num_test = self.test_filenames.__len__()
+
+    def __getitem__(self, index):
+        if self.mode == 'train':
+            image = Image.open(self.train_filenames[index])
+            pose = self.train_poses[index]
+        elif self.mode in ['val', 'test']:
+            image = Image.open(self.test_filenames[index])
+            pose = self.test_poses[index]
+
+        return self.transform(image), torch.Tensor(pose)
+
+    def __len__(self):
+        if self.mode == 'train':
+            num_data = self.num_train
+        elif self.mode in ['val', 'test']:
+            num_data = self.num_test
+        return num_data
+
+
+
+def get_loader(model, image_path, metadata_path, mode, batch_size, is_shuffle=False, num_val=100):
+
+    # Predefine image size
+    if model == 'Googlenet':
+        img_size = 300
+        img_crop = 299
+    elif model == 'Resnet':
+        img_size = 256
+        img_crop = 224
+
+
+    if mode == 'train':
+        transform = transforms.Compose([
+            transforms.Resize(img_size),
+            transforms.RandomCrop(img_crop),
+            transforms.ColorJitter(0.5, 0.5, 0.5, 0.2),            
+            transforms.ToTensor(),
+            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
+        ])
+
+        # metadata_path_val = '/mnt/data2/image_based_localization/posenet/KingsCollege/dataset_test.txt'
+        datasets = {'train': CustomDataset(image_path, metadata_path, 'train', transform, num_val),
+                    'val': CustomDataset(image_path, metadata_path, 'val', transform, num_val)}
+        # data_loaders = {x: DataLoader(datasets[x], batch_size, is_shuffle, num_workers=batch_size)
+        #                 for x in ['train', 'val']}
+        data_loaders = {'train': DataLoader(datasets['train'], batch_size, is_shuffle, num_workers=4),
+                        'val': DataLoader(datasets['val'], batch_size, is_shuffle, num_workers=4)}
+    elif mode == 'test':
+        transform = transforms.Compose([
+            transforms.Resize(img_size),
+            transforms.CenterCrop(img_crop),
+            transforms.ToTensor(),
+            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
+        ])
+
+        batch_size = 1
+        is_shuffle = False
+        dataset = CustomDataset(image_path, metadata_path, 'test', transform)
+        data_loaders = DataLoader(dataset, batch_size, is_shuffle, num_workers=4)
+
+    else:
+        assert 'Unavailable Mode'
+
+    return data_loaders
+
+
diff --git a/hypobot_system/models/regression/dummy_metadata.csv b/hypobot_system/models/regression/dummy_metadata.csv
new file mode 100644
index 0000000000000000000000000000000000000000..52778b4e3a1928af771c5c4b6f291d420a64b8ce
--- /dev/null
+++ b/hypobot_system/models/regression/dummy_metadata.csv
@@ -0,0 +1,4 @@
+
+
+
+/home/george/aloe-catkin/src/hypobot_system/location.png 0.162321 -3.192868 0.000000 -0.706738 -0.022833 0.022833 0.706738
\ No newline at end of file
diff --git a/hypobot_system/models/regression/model.py b/hypobot_system/models/regression/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4bd703b3fe201ed5ecfe97b8a8bf412620df074
--- /dev/null
+++ b/hypobot_system/models/regression/model.py
@@ -0,0 +1,207 @@
+""" Owned By - https://github.com/youngguncho/PoseNet-Pytorch """
+import os
+import time
+import copy
+import torch
+import torchvision
+import pandas as pd
+import numpy as np
+import torch.nn as nn
+import torch.optim as optim
+from torch.optim import lr_scheduler
+from torch.utils.data import Dataset, DataLoader
+from torchvision import transforms, models, datasets
+import torch.nn.functional as F
+import matplotlib.pyplot as plt
+from PIL import Image
+
+
+def model_parser(model, fixed_weight=False, dropout_rate=0.0, bayesian = False):
+    base_model = None
+
+    if model == 'Googlenet':
+        base_model = models.inception_v3(pretrained=True)
+        network = GoogleNet(base_model, fixed_weight, dropout_rate)
+    elif model == 'Resnet':
+        base_model = models.resnet34(pretrained=True)
+        network = ResNet(base_model, fixed_weight, dropout_rate, bayesian)
+    elif model == 'ResnetSimple':
+        base_model = models.resnet34(pretrained=True)
+        network = ResNetSimple(base_model, fixed_weight)
+    else:
+        assert 'Unvalid Model'
+
+    return network
+
+
+class PoseLoss(nn.Module):
+    def __init__(self, device, sx=0.0, sq=0.0, learn_beta=False):
+        super(PoseLoss, self).__init__()
+        self.learn_beta = learn_beta
+
+        if not self.learn_beta:
+            self.sx = 0
+            self.sq = -6.25
+            
+        self.sx = nn.Parameter(torch.Tensor([sx]), requires_grad=self.learn_beta)
+        self.sq = nn.Parameter(torch.Tensor([sq]), requires_grad=self.learn_beta)
+
+        # if learn_beta:
+        #     self.sx.requires_grad = True
+        #     self.sq.requires_grad = True
+        #
+        # self.sx = self.sx.to(device)
+        # self.sq = self.sq.to(device)
+
+        self.loss_print = None
+
+    def forward(self, pred_x, pred_q, target_x, target_q):
+        pred_q = F.normalize(pred_q, p=2, dim=1)
+        loss_x = F.l1_loss(pred_x, target_x)
+        loss_q = F.l1_loss(pred_q, target_q)
+
+            
+        loss = torch.exp(-self.sx)*loss_x \
+               + self.sx \
+               + torch.exp(-self.sq)*loss_q \
+               + self.sq
+
+        self.loss_print = [loss.item(), loss_x.item(), loss_q.item()]
+
+        return loss, loss_x.item(), loss_q.item()
+
+
+class ResNet(nn.Module):
+    def __init__(self, base_model, fixed_weight=False, dropout_rate=0.0, bayesian = False):
+        super(ResNet, self).__init__()
+
+        self.bayesian = bayesian
+        self.dropout_rate = dropout_rate
+        feat_in = base_model.fc.in_features
+
+        self.base_model = nn.Sequential(*list(base_model.children())[:-1])
+        # self.base_model = base_model
+
+        if fixed_weight:
+            for param in self.base_model.parameters():
+                param.requires_grad = False
+
+        self.fc_last = nn.Linear(feat_in, 2048, bias=True)
+        self.fc_position = nn.Linear(2048, 3, bias=True)
+        self.fc_rotation = nn.Linear(2048, 4, bias=True)
+
+        init_modules = [self.fc_last, self.fc_position, self.fc_rotation]
+
+        for module in init_modules:
+            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
+                nn.init.kaiming_normal_(module.weight)
+                if module.bias is not None:
+                    nn.init.constant_(module.bias, 0)
+
+        # nn.init.normal_(self.fc_last.weight, 0, 0.01)
+        # nn.init.constant_(self.fc_last.bias, 0)
+        #
+        # nn.init.normal_(self.fc_position.weight, 0, 0.5)
+        # nn.init.constant_(self.fc_position.bias, 0)
+        #
+        # nn.init.normal_(self.fc_rotation.weight, 0, 0.01)
+        # nn.init.constant_(self.fc_rotation.bias, 0)
+
+    def forward(self, x):
+        x = self.base_model(x)
+        x = x.view(x.size(0), -1)
+        x_fully = self.fc_last(x)
+        x = F.relu(x_fully)
+
+        dropout_on = self.training or self.bayesian
+        if self.dropout_rate > 0:
+            x = F.dropout(x, p=self.dropout_rate, training=dropout_on)
+
+        position = self.fc_position(x)
+        rotation = self.fc_rotation(x)
+
+        return position, rotation, x_fully
+
+
+class ResNetSimple(nn.Module):
+    def __init__(self, base_model, fixed_weight=False, dropout_rate=0.0):
+        super(ResNetSimple, self).__init__()
+        self.dropout_rate = dropout_rate
+        feat_in = base_model.fc.in_features
+
+        self.base_model = nn.Sequential(*list(base_model.children())[:-1])
+        # self.base_model = base_model
+
+        if fixed_weight:
+            for param in self.base_model.parameters():
+                param.requires_grad = False
+
+        # self.fc_last = nn.Linear(feat_in, 2048, bias=True)
+        self.fc_position = nn.Linear(feat_in, 3, bias=False)
+        self.fc_rotation = nn.Linear(feat_in, 4, bias=False)
+
+        init_modules = [self.fc_position, self.fc_rotation]
+
+        for module in init_modules:
+            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
+                nn.init.kaiming_normal(module.weight.data)
+                if module.bias is not None:
+                    nn.init.constant(module.bias.data, 0)
+
+    def forward(self, x):
+        x = self.base_model(x)
+        x = x.view(x.size(0), -1)
+        position = self.fc_position(x)
+        rotation = self.fc_rotation(x)
+
+        return position, rotation
+
+class GoogleNet(nn.Module):
+    """ PoseNet using Inception V3 """
+    def __init__(self, base_model, fixed_weight=False, dropout_rate = 0.0):
+        super(GoogleNet, self).__init__()
+        self.dropout_rate =dropout_rate
+
+        model = []
+        model.append(base_model.Conv2d_1a_3x3)
+        model.append(base_model.Conv2d_2a_3x3)
+        model.append(base_model.Conv2d_2b_3x3)
+        model.append(nn.MaxPool2d(kernel_size=3, stride=2))
+        model.append(base_model.Conv2d_3b_1x1)
+        model.append(base_model.Conv2d_4a_3x3)
+        model.append(nn.MaxPool2d(kernel_size=3, stride=2))
+        model.append(base_model.Mixed_5b)
+        model.append(base_model.Mixed_5c)
+        model.append(base_model.Mixed_5d)
+        model.append(base_model.Mixed_6a)
+        model.append(base_model.Mixed_6b)
+        model.append(base_model.Mixed_6c)
+        model.append(base_model.Mixed_6d)
+        model.append(base_model.Mixed_6e)
+        model.append(base_model.Mixed_7a)
+        model.append(base_model.Mixed_7b)
+        model.append(base_model.Mixed_7c)
+        self.base_model = nn.Sequential(*model)
+
+        if fixed_weight:
+            for param in self.base_model.parameters():
+                param.requires_grad = False
+
+        # Out 2
+        self.pos2 = nn.Linear(2048, 3, bias=True)
+        self.ori2 = nn.Linear(2048, 4, bias=True)
+
+    def forward(self, x):
+        # 299 x 299 x 3
+        x = self.base_model(x)
+        # 8 x 8 x 2048
+        x = F.avg_pool2d(x, kernel_size=8)
+        # 1 x 1 x 2048
+        x = F.dropout(x, p=self.dropout_rate, training=self.training)
+        # 1 x 1 x 2048
+        x = x.view(x.size(0), -1)
+        # 2048
+        pos = self.pos2(x)
+        ori = self.ori2(x)
+
+        return pos, ori
diff --git a/hypobot_system/models/transformer/best_cls_net.pth b/hypobot_system/models/transformer/best_cls_net.pth
new file mode 100644
index 0000000000000000000000000000000000000000..78e8fb37a8c68923a19bfbbf833d32b0a66b52c4
Binary files /dev/null and b/hypobot_system/models/transformer/best_cls_net.pth differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/embed_patches.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/embed_patches.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b79474f3094ece5d5bd3e96714b55e473bc54774
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/embed_patches.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/mlp.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/mlp.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48023e15db43d1a517fa85b87ed965f991573501
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/mlp.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/multihead_self_attention.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/multihead_self_attention.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3fb73af9e74903dd1be17c3f58e0ce32d7488a6
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/multihead_self_attention.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/simple_vit_google.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/simple_vit_google.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1dc1ed5cbd443d8879382144ad0b0a80e3147de5
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/simple_vit_google.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/transformer_encoder.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/transformer_encoder.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2eb620a5098a97b1afffbe9940a0936a58aa7bf5
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/transformer_encoder.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/__pycache__/vit.cpython-39.pyc b/hypobot_system/models/transformer/model/__pycache__/vit.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34edd870cd0b2d2e05dda18152c417a02a3f9489
Binary files /dev/null and b/hypobot_system/models/transformer/model/__pycache__/vit.cpython-39.pyc differ
diff --git a/hypobot_system/models/transformer/model/embed_patches.py b/hypobot_system/models/transformer/model/embed_patches.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbc421b08a8e57865c079f83e12660265c38e2cf
--- /dev/null
+++ b/hypobot_system/models/transformer/model/embed_patches.py
@@ -0,0 +1,50 @@
+import torch.nn as nn
+import torch
+
+class EmbedPatches(nn.Module):
+    def __init__(self, img_size, patch_size, token, latent_vec_size=768):
+        super(EmbedPatches, self).__init__()
+
+        # Token - is the [cls] token being used?
+        # [cls] token is an extra learnable embedding that encompasses the information of all patches
+        # Input to the final MLP Head would typically be the output from this [cls] Token
+        # an alt. method is to average the values from all output vectors instead, exc. [cls]
+        # both options have been tested
+        self.token = token
+
+        self.no_patches = (img_size // patch_size) ** 2
+        self.project = nn.Conv2d(
+                3, # RGB - Channels
+                latent_vec_size,
+                kernel_size=patch_size,
+                stride=patch_size,
+        ) # configured patches to have no overlap
+
+        if self.token:
+            self.cls_token = nn.Parameter(torch.zeros(1, 1, latent_vec_size))
+            # learn the positions of all patches including [cls]
+            self.patch_position_embed = nn.Parameter(torch.zeros(1, 1 + self.no_patches, latent_vec_size))
+        else:
+            self.patch_position_embed = nn.Parameter(torch.zeros(1, self.no_patches, latent_vec_size))
+
+        self.dropout = nn.Dropout(p=0.1)
+
+    def forward(self, x):
+        batch_size = x.shape[0]
+        if self.token:
+            cls_token = self.cls_token.expand(batch_size, -1, -1) # updating cls_token dims w/ batch_size
+            x = self.project_dim(x)
+            x = torch.cat((cls_token, x), dim=1) # add [cls] token as a patch
+        else:
+            x = self.project_dim(x)
+
+        x = x + self.patch_position_embed # adding patch values embedding w/ positions of patches
+        x = self.dropout(x)
+        return x
+
+    # projects patches into latent vector size dimensions
+    def project_dim(self, x):
+        x = self.project(x)
+        x = x.flatten(2)
+        x = x.transpose(1, 2)
+        return x
diff --git a/hypobot_system/models/transformer/model/mlp.py b/hypobot_system/models/transformer/model/mlp.py
new file mode 100644
index 0000000000000000000000000000000000000000..38058bc60de52f5c10002ebf5e0908ace1002150
--- /dev/null
+++ b/hypobot_system/models/transformer/model/mlp.py
@@ -0,0 +1,13 @@
+import torch.nn as nn
+
+class MLP(nn.Module):
+    def __init__(self, input_dim, mid_dim, out_dim):
+        super(MLP, self).__init__()
+        self.mlp = nn.Sequential(
+                            nn.Linear(input_dim, mid_dim),
+                            nn.GELU(),
+                            nn.Linear(mid_dim, out_dim),
+                            nn.Dropout(0.1))
+
+    def forward(self, x):
+        return self.mlp(x)
diff --git a/hypobot_system/models/transformer/model/multihead_self_attention.py b/hypobot_system/models/transformer/model/multihead_self_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..2db2a968915a6c9e731b96679f3aafb26a524884
--- /dev/null
+++ b/hypobot_system/models/transformer/model/multihead_self_attention.py
@@ -0,0 +1,42 @@
+import torch.nn as nn
+import torch
+import numpy as np
+import torch.nn.functional as F
+
+class MHSA(nn.Module):
+    def __init__(self, latent_vec_size, no_heads=3):
+        super(MHSA, self).__init__()
+        self.no_heads = no_heads # how many individual heads in msa
+        self.head_dim = latent_vec_size // no_heads # when heads outputs are concat they have same dim as input
+
+        self.qkv = nn.Linear(latent_vec_size, latent_vec_size*3, bias=False)
+        self.attn_dropout = nn.Dropout(0.1)
+        self.project = nn.Linear(latent_vec_size,latent_vec_size)
+
+    def getQKV(self, x, batch_size, no_seq, latent_vec_size):
+        x = x.reshape(batch_size, no_seq, self.no_heads, 3*self.head_dim) # (batch_size, no_seq, no_heads, qkv-values)
+        x = x.permute(0,2,1,3) # (batch_size, no_heads, no_seq, qkv-values)
+        return x.chunk(3, dim=-1) # splits q,k,v values
+
+    def calculateAttentions(self, q, k, v):
+        softmax_norm = self.head_dim**-0.5
+        input_to = torch.matmul(q, k.transpose(-2,-1)) * softmax_norm
+        return F.softmax(input_to, dim=-1) # returns prob dist. to use as weightings
+
+    # https://arxiv.org/pdf/2010.11929.pdf - Appendix A eq 5-7
+    def forward(self, x):
+        # embedded input patches run through a linear layer to learn
+        # corresponding weights for key, value and query
+        # output is the key, value, query
+        batch_size, no_seq, latent_vec_size = x.shape # (batch_size, no_seq, latent_vec_size)
+        qkv = self.qkv(x) # (batch_size, no_seq, latent_vec_size*3)
+        q,k,v = self.getQKV(qkv, batch_size, no_seq, latent_vec_size)
+
+        attentions = self.calculateAttentions(q,k,v)
+        attentions = self.attn_dropout(attentions)
+        self_attention = torch.matmul(attentions, v) # (batch_size, no_heads, no_seq, head_dim)
+        self_attention = self_attention.transpose(1,2) # (batch_size, no_seq, no_heads, head_dim)
+        self_attention = self_attention.flatten(2) # concats values from all the no_heads
+
+        context_vectors = self.project(self_attention)
+        return context_vectors
diff --git a/hypobot_system/models/transformer/model/transformer_encoder.py b/hypobot_system/models/transformer/model/transformer_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..99129e4e214aa8a0cecd9bd862121da561b334de
--- /dev/null
+++ b/hypobot_system/models/transformer/model/transformer_encoder.py
@@ -0,0 +1,30 @@
+import torch.nn as nn
+
+from multihead_self_attention import MHSA
+from mlp import MLP
+
+# See diagram on pg3 TransformerEncoder https://arxiv.org/pdf/2010.11929.pdf
+class TransformerEncoder(nn.Module):
+    def __init__(self, latent_vec_size, no_heads):
+        super(TransformerEncoder, self).__init__()
+
+        self.encoder_block_1 = nn.Sequential(
+                                nn.LayerNorm(latent_vec_size),
+                                MHSA(latent_vec_size,no_heads=no_heads)
+                                )
+        # mlp ratio of 4 used to decide features the output dim of first
+        # linear layer in mlp - not sure why - nothing in paper on this
+        # see links to reference materials
+        mid_dim = int(latent_vec_size * 4.0)
+        self.encoder_block_2 = nn.Sequential(
+                                    nn.LayerNorm(latent_vec_size),
+                                    MLP(
+                                        input_dim=latent_vec_size,
+                                        mid_dim=mid_dim,
+                                        out_dim=latent_vec_size,
+                                    ))
+
+    def forward(self, x):
+        x = x + self.encoder_block_1(x)
+        x = x + self.encoder_block_2(x)
+        return x
diff --git a/hypobot_system/models/transformer/model/vit.py b/hypobot_system/models/transformer/model/vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..a77378d2ff407be62eeb782e198eac8d7ea2bce1
--- /dev/null
+++ b/hypobot_system/models/transformer/model/vit.py
@@ -0,0 +1,57 @@
+from embed_patches import EmbedPatches
+from transformer_encoder import TransformerEncoder
+import torch.nn as nn
+import torch
+
+class VIT(nn.Module):
+    def __init__(self, img_size, patch_size, latent_vec_size=768, no_encoders=3, no_heads=3, type="regression", token=True, no_classes=4):
+            super(VIT, self).__init__()
+            self.token = token
+
+            # regression or classification - Note: Classification is untested
+            # in this implementation
+            self.type = type
+            self.embed_patches = EmbedPatches(
+                    img_size=img_size,
+                    patch_size=patch_size,
+                    latent_vec_size=latent_vec_size,
+                    token=token
+            )
+            self.encoders = nn.ModuleList(
+                [
+                    TransformerEncoder(
+                        latent_vec_size=latent_vec_size,
+                        no_heads=no_heads
+                    )
+                    for _ in range(no_encoders)
+                ]
+            )
+
+            if type == "regression":
+                self.regress_fc_pose = nn.Sequential(nn.Linear(latent_vec_size, 2048),
+                                                     nn.ReLU(),
+                                                     nn.Dropout(0.5))
+                self.fc_xyz = nn.Linear(2048, 3)
+                self.fc_wpqr = nn.Linear(2048, 4)
+            else:
+                self.fc_class = nn.Linear(latent_vec_size, no_classes)
+
+    def forward(self, x):
+            x = self.embed_patches(x)
+            for encoder in self.encoders:
+                x = encoder(x)
+
+            if self.token:
+                final_output = x[:, 0] # output of class_token
+            else:
+                final_output = torch.mean(x, 1) # alt. method - avg of all patch outputs
+
+            if self.type == 'regression':
+                x = self.regress_fc_pose(final_output)
+                xyz = self.fc_xyz(x)
+                ori = self.fc_wpqr(x)
+
+                return xyz, ori
+
+            else:
+                return self.fc_class(final_output)
diff --git a/hypobot_system/msg/event_via_img.msg b/hypobot_system/msg/event_via_img.msg
new file mode 100644
index 0000000000000000000000000000000000000000..7b571463eb02002fc8a4c2498d7f6f870ff90701
--- /dev/null
+++ b/hypobot_system/msg/event_via_img.msg
@@ -0,0 +1,3 @@
+sensor_msgs/Image image
+string payload
+string urgency
\ No newline at end of file
diff --git a/hypobot_system/msg/event_via_pose.msg b/hypobot_system/msg/event_via_pose.msg
new file mode 100644
index 0000000000000000000000000000000000000000..b52a3a769d0b54dba104a3090117038b5004b6fc
--- /dev/null
+++ b/hypobot_system/msg/event_via_pose.msg
@@ -0,0 +1,3 @@
+geometry_msgs/Pose position
+string payload
+string urgency
\ No newline at end of file
diff --git a/hypobot_system/package.xml b/hypobot_system/package.xml
new file mode 100644
index 0000000000000000000000000000000000000000..271d0480570f7f6172a288648f0395768a0ce473
--- /dev/null
+++ b/hypobot_system/package.xml
@@ -0,0 +1,79 @@
+<?xml version="1.0"?>
+<package format="2">
+  <name>hypobot_system</name>
+  <version>0.0.0</version>
+  <description>The hypobot_system package</description>
+
+  <!-- One maintainer tag required, multiple allowed, one person per tag -->
+  <!-- Example:  -->
+  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
+  <maintainer email="george@todo.todo">george</maintainer>
+
+
+  <!-- One license tag required, multiple allowed, one license per tag -->
+  <!-- Commonly used license strings: -->
+  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
+  <license>TODO</license>
+
+
+  <!-- Url tags are optional, but multiple are allowed, one per tag -->
+  <!-- Optional attribute type can be: website, bugtracker, or repository -->
+  <!-- Example: -->
+  <!-- <url type="website">http://wiki.ros.org/hypobot_system</url> -->
+
+
+  <!-- Author tags are optional, multiple are allowed, one per tag -->
+  <!-- Authors do not have to be maintainers, but could be -->
+  <!-- Example: -->
+  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
+
+
+  <!-- The *depend tags are used to specify dependencies -->
+  <!-- Dependencies can be catkin packages or system dependencies -->
+  <!-- Examples: -->
+  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
+  <!--   <depend>roscpp</depend> -->
+  <!--   Note that this is equivalent to the following: -->
+  <!--   <build_depend>roscpp</build_depend> -->
+  <!--   <exec_depend>roscpp</exec_depend> -->
+  <!-- Use build_depend for packages you need at compile time: -->
+  <!--   <build_depend>message_generation</build_depend> -->
+  <!-- Use build_export_depend for packages you need in order to build against this package: -->
+  <!--   <build_export_depend>message_generation</build_export_depend> -->
+  <!-- Use buildtool_depend for build tool packages: -->
+  <!--   <buildtool_depend>catkin</buildtool_depend> -->
+  <!-- Use exec_depend for packages you need at runtime: -->
+  <!--   <exec_depend>message_runtime</exec_depend> -->
+  <!-- Use test_depend for packages you need only for testing: -->
+  <!--   <test_depend>gtest</test_depend> -->
+  <!-- Use doc_depend for packages you need only for building documentation: -->
+  <!--   <doc_depend>doxygen</doc_depend> -->
+  <buildtool_depend>catkin</buildtool_depend>
+  <build_depend>geometry_msgs</build_depend>
+  <build_depend>roscpp</build_depend>
+  <build_depend>rospy</build_depend>
+  <build_depend>rviz</build_depend>
+  <build_depend>std_msgs</build_depend>
+  <build_depend>message_generation</build_depend>
+  <build_depend>roslint</build_depend>
+
+  <build_export_depend>geometry_msgs</build_export_depend>
+  <build_export_depend>roscpp</build_export_depend>
+  <build_export_depend>rospy</build_export_depend>
+  <build_export_depend>rviz</build_export_depend>
+  <build_export_depend>std_msgs</build_export_depend>
+
+  <exec_depend>geometry_msgs</exec_depend>
+  <exec_depend>roscpp</exec_depend>
+  <exec_depend>rospy</exec_depend>
+  <exec_depend>rviz</exec_depend>
+  <exec_depend>std_msgs</exec_depend>
+  <exec_depend>message_runtime</exec_depend>
+
+
+  <!-- The export tag contains other, unspecified, tags -->
+  <export>
+    <!-- Other tools can request additional information be placed here -->
+
+  </export>
+</package>
diff --git a/hypobot_system/params/environment.yaml b/hypobot_system/params/environment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4883398f9c2ab8b951f2c3b2a621629f9825d65c
--- /dev/null
+++ b/hypobot_system/params/environment.yaml
@@ -0,0 +1,2 @@
+restock_loc_x: 38
+restock_loc_y: 35
\ No newline at end of file
diff --git a/hypobot_system/params/robots.yaml b/hypobot_system/params/robots.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a626a253e7b06c9169934aff958a8d2e2235b138
--- /dev/null
+++ b/hypobot_system/params/robots.yaml
@@ -0,0 +1 @@
+online_robots: ['butthead']
\ No newline at end of file
diff --git a/hypobot_system/params/system.yaml b/hypobot_system/params/system.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4fa9650e21b337764c33fccbfee97edb995a0955
--- /dev/null
+++ b/hypobot_system/params/system.yaml
@@ -0,0 +1 @@
+location_method: 'pose' # pose, classification, regression, transformer
\ No newline at end of file
diff --git a/hypobot_system/scripts/inventory_server.py b/hypobot_system/scripts/inventory_server.py
new file mode 100755
index 0000000000000000000000000000000000000000..a825aa6eef5394cc5b1f1c877b3fb2617c52ae7d
--- /dev/null
+++ b/hypobot_system/scripts/inventory_server.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+import rospy
+from hypobot_system.srv import *
+
+# Hardcoded and therefore will reset everytime system 
+# is restarted. Future Work: Write out to DB
+inventory = {
+  "dextrose tablets": 100,
+  "glucose gel": 100,
+  "insulin pen": 100
+}
+
+def handle_reset_inventory(req):
+    for key in inventory:
+        inventory[key] = 1
+    return GetInventoryResponse(str(inventory))
+
+def handle_get_inventory(req):
+    return GetInventoryResponse(str(inventory))
+
+def handle_incr_inventory(req):
+    inventory[req.payload] += req.count
+    return UpdateInventoryResponse(str(inventory))
+
+def handle_decr_inventory(req):
+    inventory[req.payload] -= req.count
+    return UpdateInventoryResponse(str(inventory))
+
+def handle_remove_all_inventory(req):
+    for key in inventory:
+        inventory[key] = 0
+    return GetInventoryResponse(str(inventory))
+
+def get_inventory():
+    s = rospy.Service('get_inventory', GetInventory, handle_get_inventory)
+
+def incr_inventory():
+    s = rospy.Service("incr_inventory", UpdateInventory, handle_incr_inventory)
+
+def decr_inventory():
+    s = rospy.Service("decr_inventory", UpdateInventory, handle_decr_inventory)
+
+def reset_inventory():
+    s = rospy.Service("reset_inventory", GetInventory, handle_reset_inventory)
+
+def remove_all_inventory():
+    s = rospy.Service("remove_all_inventory", GetInventory, handle_remove_all_inventory)
+
+def create_services(robot_name):
+    rospy.init_node('inventory_server')
+
+    # available services
+    get_inventory()
+    incr_inventory()
+    decr_inventory()
+    reset_inventory()
+    remove_all_inventory()
+    
+    rospy.loginfo("Inventory Server Running for %s", robot_name)
+    rospy.spin()
+
+if __name__ == "__main__":
+    try:
+       robot_name = rospy.get_param('robot_name')
+       create_services(robot_name)
+       rospy.spin()
+    except rospy.ROSInterruptException:
+        pass
\ No newline at end of file
diff --git a/hypobot_system/src/__pycache__/find_location_via_reg.cpython-39.pyc b/hypobot_system/src/__pycache__/find_location_via_reg.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b0b42490bc411b23ec0c13f70703d56db27f5dd
Binary files /dev/null and b/hypobot_system/src/__pycache__/find_location_via_reg.cpython-39.pyc differ
diff --git a/hypobot_system/src/central_manager.py b/hypobot_system/src/central_manager.py
new file mode 100755
index 0000000000000000000000000000000000000000..c3ebb6aa6b836180dc54cf21c3beb65384c98e79
--- /dev/null
+++ b/hypobot_system/src/central_manager.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+
+from find_location_via_reg import FindViaRegression
+import rospy
+from register_manager import RobotManager
+from events import Events
+import cv2
+from cv_bridge import CvBridge
+from hypobot_system.msg import event_via_pose, event_via_img
+from find_location_via_class import FindViaClassification
+from find_location_via_reg import FindViaRegression
+from find_location_via_tf import FindViaTransformer
+
+# TODO: Clean up allocation and use Events Class for control of prioritistion
+class CentralManager():
+    def __init__(self):
+       # register robots - assigns them an inventory server and navigation ability
+       self.rm = RobotManager()
+       self.events = Events()
+       self.bridge = CvBridge()
+       self.search_method = rospy.get_param('location_method') # pose provided, req. regression or classification
+       self.find_class = FindViaClassification()
+       self.find_poser = FindViaRegression()
+       self.find_poset = FindViaTransformer()
+       self.event_type = None # message type
+       self.monitor_events()
+    
+       self.restock_goal = event_via_pose()
+       self.restock_goal.position.position.x = rospy.get_param('restock_loc_x')
+       self.restock_goal.position.position.y = rospy.get_param('restock_loc_y')
+       self.restock_goal.position.orientation.w = 1
+
+    def find_location(self, image):
+        img = self.bridge.imgmsg_to_cv2(image)
+        cv2.imwrite('/home/george/aloe-catkin/src/hypobot_system/location.png', img)
+
+        if self.search_method == 'classification':
+            return self.find_class.get_location()
+        if self.search_method == 'regression':
+            return self.find_poser.find_location()
+        if self.search_method == 'transformer':
+            return self.find_poset.find_location()
+
+    def allocate_rescue_robot(self, msg):
+        if self.rm.number_robots_available() == 1:
+            robot = self.rm.get_available_robot()
+            robot.attending_event = True
+            if robot.inv_manager.has_inventory(msg.payload)[0]:
+                rospy.loginfo("{0} is navigating to the rescue location".format(robot.name))
+                ######################### for test only
+                print(robot.nav_controller.calculate_dist(msg))
+                ######################################
+                state = robot.nav_controller.send_goal(msg)
+                rospy.loginfo("{0} has reached the rescue location".format(robot.name))
+                if state == 3:
+                    ######################### for test only
+                    print(rospy.get_rostime())
+                    ######################################
+                    robot.inv_manager.decrease_inventory(msg.payload)
+                    robot.attending_event = False
+            else:
+                rospy.loginfo("{0} is navigating to the restock location prior to rescue location".format(robot.name))
+                robot.nav_controller.send_goal(self.restock_goal)
+                robot.inv_manager.increase_inventory()
+                state = robot.nav_controller.send_goal(msg)
+                if state == 3:
+                    robot.inv_manager.decrease_inventory(msg.payload)
+                    robot.attending_event = False
+
+        # allocate
+        if self.rm.number_robots_available() > 1:
+            robot_1, robot_2 = self.rm.get_available_robots()
+            if robot_1.inv_manager.has_inventory(msg.payload)[0] and robot_2.inv_manager.has_inventory(msg.payload)[0]:
+                if robot_1.nav_controller.calculate_dist(msg) < robot_2.nav_controller.calculate_dist(msg):
+                    robot_1.attending_event = True
+                    rospy.loginfo("{0} is navigating to the rescue location".format(robot_1.name))
+                    state = robot_1.nav_controller.send_goal(msg)
+                    if state == 3:
+                        robot_1.inv_manager.decrease_inventory(msg.payload)
+                        robot_1.attending_event = False
+                else:
+                    robot_2.attending_event = True
+                    rospy.loginfo("{0} is navigating to the rescue location".format(robot_2.name))
+                    state = robot_2.nav_controller.send_goal(msg)
+                    if state == 3:
+                        robot_2.inv_manager.decrease_inventory(msg.payload)
+                        robot_2.attending_event = False
+        
+            elif robot_1.inv_manager.has_inventory(msg.payload)[0] and not robot_2.inv_manager.has_inventory(msg.payload)[0]:
+                robot_1.attending_event = True
+                rospy.loginfo("{0} is navigating to the rescue location".format(robot_1.name))
+                state = robot_1.nav_controller.send_goal(msg)
+                if state == 3:
+                    robot_1.inv_manager.decrease_inventory(msg.payload)
+                    robot_1.attending_event = False
+            elif not robot_1.inv_manager.has_inventory(msg.payload)[0] and robot_2.inv_manager.has_inventory(msg.payload)[0]:
+                robot_2.attending_event = True
+                rospy.loginfo("{0} is navigating to the rescue location".format(robot_2.name))
+                state = robot_2.nav_controller.send_goal(msg)
+                if state == 3:
+                    robot_2.inv_manager.decrease_inventory(msg.payload)
+                    robot_2.attending_event = False
+            else:
+                if robot_1.nav_controller.calculate_dist(self.restock_goal) < robot_2.nav_controller.calculate_dist(self.restock_goal):
+                    robot_1.attending_event = True
+                    rospy.loginfo("{0} is navigating to the restock location prior to rescue location".format(robot_1.name))
+                    state = robot_1.nav_controller.send_goal(self.restock_goal)
+                    if state == 3:
+                        robot_1.inv_manager.increase_inventory()
+                        new_state = robot_1.nav_controller.send_goal(msg)
+                        if new_state == 3:
+                            robot_1.inv_manager.decrease_inventory(msg.payload)
+                            robot_1.attending_event = False
+                else:
+                    robot_2.attending_event = True
+                    rospy.loginfo("{0} is navigating to the restock location prior to rescue location".format(robot_2.name))
+                    state = robot_2.nav_controller.send_goal(self.restock_goal)
+                    if state == 3:
+                        robot_2.inv_manager.increase_inventory()
+                        new_state = robot_2.nav_controller.send_goal(msg)
+                        if new_state == 3:
+                            robot_2.inv_manager.decrease_inventory(msg.payload)
+                            robot_2.attending_event = False
+
+    def handle_event(self, msg):
+        if self.event_type == event_via_img:
+            x, y = self.find_location(msg.image)
+            new_msg = event_via_pose()
+            new_msg.position.position.x = x
+            new_msg.position.position.y = y
+            new_msg.position.orientation.w = 1
+            new_msg.payload = msg.payload
+            self.allocate_rescue_robot(new_msg)
+        else:
+            rospy.loginfo("Rescue event received - Action is being taken")
+            self.allocate_rescue_robot(msg)
+
+    def red_callback(self, msg):
+        if self.rm.number_robots_available() == 0:
+            self.events.red_events.append(msg)
+            rospy.loginfo("No robots available - Event has been queued as high priority")
+        else:
+            self.handle_event(msg)
+
+    def amber_callback(self, msg):
+        if self.rm.number_robots_available() == 0:
+            self.events.amber_events.append(msg)
+            rospy.loginfo("No robots available - Event has been queued as medium priority")
+        else:
+            print(rospy.get_rostime())
+            self.handle_event(msg)
+
+    def green_callback(self, msg):
+        if self.rm.number_robots_available() == 0:
+            self.events.green_events.append(msg)
+            rospy.loginfo("No robots available - Event has been queued as low priority")
+        else:
+            self.handle_event(msg)
+        
+    def monitor_events(self):
+        rospy.loginfo("Location search via {0}".format(self.search_method))
+        if self.search_method == 'pose':
+            self.event_type = event_via_pose
+        else:
+            self.event_type = event_via_img
+        rospy.Subscriber('red_events', self.event_type, self.red_callback)
+        rospy.Subscriber('amber_events', self.event_type, self.amber_callback)
+        rospy.Subscriber('green_events', self.event_type, self.green_callback)
+
+
+if __name__ == '__main__':
+    rospy.init_node('central_manager')
+    CentralManager()
+    try:
+       rospy.spin()
+
+    except rospy.ROSInterruptException:
+        pass
\ No newline at end of file
diff --git a/hypobot_system/src/constants.py b/hypobot_system/src/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..c52086f1b477d9082f0535ea5a874a32ae26b846
--- /dev/null
+++ b/hypobot_system/src/constants.py
@@ -0,0 +1,2 @@
+ROBOT_1 = "butthead"
+ROBOT_2 = "stimpy"
\ No newline at end of file
diff --git a/hypobot_system/src/events.py b/hypobot_system/src/events.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5536feea8991ac5b1fb4d1158a05b99294ca39
--- /dev/null
+++ b/hypobot_system/src/events.py
@@ -0,0 +1,5 @@
+class Events():
+    def __init__(self):
+        self.red_events = []
+        self.amber_events = []
+        self.green_events = []
\ No newline at end of file
diff --git a/hypobot_system/src/events.pyc b/hypobot_system/src/events.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10073312f34335c8354c96b1be049dc26fe2d35f
Binary files /dev/null and b/hypobot_system/src/events.pyc differ
diff --git a/hypobot_system/src/find_location_via_class.py b/hypobot_system/src/find_location_via_class.py
new file mode 100644
index 0000000000000000000000000000000000000000..5332c0241e03b9e9d4a073151a32dce8806d9d5a
--- /dev/null
+++ b/hypobot_system/src/find_location_via_class.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+
+import torch
+import torch.nn as nn
+import torchvision.transforms as transforms
+from PIL import Image
+import sys
+
+sys.path.insert(1, "/home/george/aloe-catkin/src/hypobot_system/models/classification")
+
+from google_model import GoogLeNet
+import torchvision.transforms as transforms
+
+class FindViaClassification():
+    def __init__(self):
+        self.model = GoogLeNet()
+
+    def get_location(self):
+        self.model.load_state_dict(torch.load('/home/george/aloe-catkin/src/hypobot_system/models/classification/best_cls_net.pth', map_location='cpu'))
+
+        map_label = {
+            0: (40.9, 38.5), #'Robot Lab',
+            1: (40.5, 40), #'Stationary Room',
+            2: (41.4, 30.8), #'Corridor',
+            3: (39, 26) #'PhD Lab'
+        }
+        room_label = {
+            0: 'Robot Lab',
+            1: 'Stationary Room',
+            2: 'Corridor',
+            3: 'PhD Lab'
+        }
+
+        image = Image.open('/home/george/aloe-catkin/src/hypobot_system/location.png')
+        softmax = nn.Softmax(dim=1)
+        convert_tensor = transforms.Compose([transforms.CenterCrop(224),transforms.ToTensor()])
+        image = convert_tensor(image)
+        image = image.unsqueeze(0)
+        
+        with torch.no_grad():
+            output, o2, o3 = self.model(image)
+            output = softmax(output)
+            index = torch.argmax(output)
+        
+            label = map_label[index.item()]
+            print(room_label[index.item()])
+            print(label)
+
+        return label
+
+def main():
+  FindViaClassification()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/hypobot_system/src/find_location_via_class.pyc b/hypobot_system/src/find_location_via_class.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79e4c954f8ff920f81e4b417f8f50ad7da22d9a5
Binary files /dev/null and b/hypobot_system/src/find_location_via_class.pyc differ
diff --git a/hypobot_system/src/find_location_via_reg.py b/hypobot_system/src/find_location_via_reg.py
new file mode 100644
index 0000000000000000000000000000000000000000..afe4193fff44324182b68e1d2bd214ac8b208d46
--- /dev/null
+++ b/hypobot_system/src/find_location_via_reg.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+import torch
+import numpy as np
+
+import rospy
+import tf
+from geometry_msgs.msg import PoseStamped
+import sys
+
+sys.path.insert(1, "/home/george/aloe-catkin/src/hypobot_system/models/regression")
+
+from data_loader import get_loader
+from model import model_parser
+
+class FindViaRegression():
+    def __init__(self):
+        self.model = model_parser('Resnet', False, 0.5, False)
+
+    def find_location(self):
+        self.data_loader = get_loader('Resnet', '/home/george/aloe-catkin/src/hypobot_system', '/home/george/aloe-catkin/src/hypobot_system/models/regression/dummy_metadata.csv', 'test', 'True')
+        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+
+        self.model = self.model.to(self.device)
+        self.model.load_state_dict(torch.load("/home/george/aloe-catkin/src/hypobot_system/models/regression/150_best_net.pth"))
+        self.model.eval()
+
+        pose = np.zeros((1,7))
+        with torch.no_grad():
+            for i, (inputs,poses) in enumerate(self.data_loader):
+                pos_out, ori_out, _ = self.model(inputs)
+                pose = torch.cat((pos_out, ori_out), dim=1)
+                pose = pose.numpy()
+                new_pose = self.transform(pose[0])
+                print(new_pose)
+                return new_pose
+                
+
+    def transformPoint(self,x,y,z,rx,ry,rz,rw, listener):
+        ps = PoseStamped()
+        ps.header.frame_id = "/butthead/map-gt"
+        ps.header.stamp = rospy.Time(0)
+        ps.pose.position.x = x
+        ps.pose.position.y = y
+        ps.pose.position.z = z
+        ps.pose.orientation.x = rx
+        ps.pose.orientation.y = ry
+        ps.pose.orientation.z = rz
+        ps.pose.orientation.w = rw    
+        new_pose = listener.transformPose('/butthead/map', ps)
+
+        return new_pose.pose.position.x, new_pose.pose.position.y
+
+    def transform(self, pose):
+        listener = tf.TransformListener()
+        x,y,z,rx,ry,rz,rw = pose[:]
+        listener.waitForTransform('/butthead/map', '/butthead/map-gt', rospy.Time(), rospy.Duration(5.0))
+        new_x, new_y = self.transformPoint(x,y,z,rx,ry,rz,rw, listener)
+        return new_x,new_y
+
+def main():
+  FindViaRegression()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/hypobot_system/src/find_location_via_reg.pyc b/hypobot_system/src/find_location_via_reg.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cd27dcbf5f4ddefa949007b65b911e0dfaa80f0
Binary files /dev/null and b/hypobot_system/src/find_location_via_reg.pyc differ
diff --git a/hypobot_system/src/find_location_via_tf.py b/hypobot_system/src/find_location_via_tf.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b41861cbc476af5c4fbb38b25d86da0b6349823
--- /dev/null
+++ b/hypobot_system/src/find_location_via_tf.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+import torch
+import torch.nn as nn
+import torchvision.transforms as transforms
+from PIL import Image
+import sys
+import rospy
+import tf
+from geometry_msgs.msg import PoseStamped
+
+sys.path.insert(1, "/home/george/aloe-catkin/src/hypobot_system/models/transformer/model")
+
+from vit import VIT
+
+class FindViaTransformer():
+    def __init__(self):
+        self.model = VIT(img_size=256, patch_size=32, token=True)
+
+    def find_location(self):
+        self.model.load_state_dict(torch.load('/home/george/aloe-catkin/src/hypobot_system/models/transformer/best_cls_net.pth', map_location='cpu')['model_state_dict'])
+
+        image = Image.open('/home/george/aloe-catkin/src/hypobot_system/location.png')
+        convert_tensor = transforms.Compose([transforms.CenterCrop(256),transforms.ToTensor()])
+        image = convert_tensor(image)
+        image = image.unsqueeze(0)
+        print(image.shape)        
+
+        with torch.no_grad():
+            pos, ori = self.model(image)
+            pose = torch.cat((pos, ori),1)
+            new_pose = self.transform(pose)
+            print(new_pose)
+
+        return new_pose
+
+    def transformPoint(self,x,y,z,rx,ry,rz,rw, listener):
+        ps = PoseStamped()
+        ps.header.frame_id = "/butthead/map-gt"
+        ps.header.stamp = rospy.Time(0)
+        ps.pose.position.x = x
+        ps.pose.position.y = y
+        ps.pose.position.z = z
+        ps.pose.orientation.x = rx
+        ps.pose.orientation.y = ry
+        ps.pose.orientation.z = rz
+        ps.pose.orientation.w = rw    
+        new_pose = listener.transformPose('/butthead/map', ps)
+
+        return new_pose.pose.position.x, new_pose.pose.position.y
+
+    def transform(self, pose):
+        listener = tf.TransformListener()
+        x,y,z,rx,ry,rz,rw = pose[0]
+        listener.waitForTransform('/butthead/map', '/butthead/map-gt', rospy.Time(), rospy.Duration(5.0))
+        new_x, new_y = self.transformPoint(x,y,z,rx,ry,rz,rw, listener)
+        return new_x,new_y
+
+def main():
+  FindViaTransformer()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/hypobot_system/src/find_location_via_tf.pyc b/hypobot_system/src/find_location_via_tf.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fae5efcc5457fe2484291b4c066c6c2f8b04005
Binary files /dev/null and b/hypobot_system/src/find_location_via_tf.pyc differ
diff --git a/hypobot_system/src/manage_inventory.py b/hypobot_system/src/manage_inventory.py
new file mode 100644
index 0000000000000000000000000000000000000000..6de9a452c259e8b601fa44e65ee15ac066592392
--- /dev/null
+++ b/hypobot_system/src/manage_inventory.py
@@ -0,0 +1,49 @@
+import rospy
+from mini_delivery.srv import *
+
+class ManageInventory():
+    def __init__(self, robot_name):
+        self.robot_name = robot_name
+        self.get_inventory = rospy.ServiceProxy('{0}/get_inventory'.format(robot_name), GetInventory)
+        self.incr_inventory = rospy.ServiceProxy('{0}/incr_inventory'.format(robot_name), UpdateInventory)
+        self.decr_inventory = rospy.ServiceProxy('{0}/decr_inventory'.format(robot_name), UpdateInventory)
+    
+    def has_inventory(self, req_payload):
+        rospy.wait_for_service('{0}/get_inventory'.format(self.robot_name))
+        try:
+            inventory = self.get_inventory()
+            inventory = eval(inventory.available_inventory)
+            return (True if  inventory[req_payload] > 0 else False), inventory
+        
+        except rospy.ServiceException as e:
+            self.handle_service_failure(e)
+    
+    def increase_inventory(self):
+        rospy.wait_for_service('{0}/get_inventory'.format(self.robot_name))
+        try:
+            inventory = self.get_inventory()
+            inventory = eval(inventory.available_inventory)
+        except rospy.ServiceException as e:
+            self.handle_service_failure(e)
+            return False
+
+        listOfKeys = [key for (key, value) in inventory.items() if value == 0]
+        rospy.wait_for_service('{0}/incr_inventory'.format(self.robot_name))
+        try:
+            for item in listOfKeys:
+               self.incr_inventory(item, 1)
+            return True
+        except rospy.ServiceException as e:
+            self.handle_service_failure(e)
+            return False
+
+    def decrease_inventory(self, payload):
+        rospy.wait_for_service('{0}/decr_inventory'.format(self.robot_name))
+        try:
+            self.decr_inventory(payload, 1)
+        except rospy.ServiceException as e:
+            self.handle_service_failure(e)
+
+    def handle_service_failure(self, e):
+        rospy.logerr("Service call failed: %s", e)
+        pass
\ No newline at end of file
diff --git a/hypobot_system/src/manage_inventory.pyc b/hypobot_system/src/manage_inventory.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2699da519d0a4bbed1134b962c764bf191c210ef
Binary files /dev/null and b/hypobot_system/src/manage_inventory.pyc differ
diff --git a/hypobot_system/src/navigate.py b/hypobot_system/src/navigate.py
new file mode 100644
index 0000000000000000000000000000000000000000..b62f54c44e269d1dee648e3eba6a1e7982262dca
--- /dev/null
+++ b/hypobot_system/src/navigate.py
@@ -0,0 +1,105 @@
+import rospy
+import actionlib
+import math
+from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
+from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped, PoseWithCovarianceStamped
+from nav_msgs.srv import GetPlan
+
+class Navigate():
+    def __init__(self, robot_name):
+        self.robot_name = robot_name
+        self.client = actionlib.SimpleActionClient('/{0}/move_base'.format(self.robot_name), MoveBaseAction)
+        self.make_plan = rospy.ServiceProxy("/{0}/move_base/make_plan".format(self.robot_name), GetPlan)
+        self.pose_sub = rospy.Subscriber("/{0}/amcl_pose".format(self.robot_name), PoseWithCovarianceStamped, self.update_current_pos)
+
+        # TODO: Use the Pose type
+        self.current_pose = { # estimated current pose from amcl
+            'x': None,
+            'y': None
+        }
+
+        wait = self.client.wait_for_server(rospy.Duration(60))
+        if not wait:
+            rospy.logerr("Action server not available!")
+            rospy.signal_shutdown("Action server not available!")
+            return
+
+        rospy.loginfo("Connected to Action server for %s", robot_name)
+
+    def update_current_pos(self, result):
+        self.current_pose = {
+            'x':result.pose.pose.position.x,
+            'y': result.pose.pose.position.y
+        }
+
+    def send_goal(self, msg):
+        self.client.wait_for_server()
+        goal = MoveBaseGoal()
+        map = "/{0}/map".format(self.robot_name)
+        goal.target_pose.header.frame_id = map
+        goal.target_pose.header.stamp = rospy.Time.now()
+        goal.target_pose.pose = msg.position
+        self.client.send_goal(goal)
+        wait = self.client.wait_for_result(rospy.Duration(200))
+        if not wait:
+            rospy.logerr("Action server not available!")
+        else:
+            return self.client.get_state()
+    
+    def get_plan(self, event):
+        start = PoseStamped()
+        map = "{0}/map".format(self.robot_name)
+        start.header.seq = 0
+        start.header.frame_id = map
+        start.header.stamp = rospy.Time(0)
+
+        start.pose.position.x = self.current_pose['x']
+        start.pose.position.y = self.current_pose['y']
+
+        Goal = PoseStamped()
+        Goal.header.seq = 0
+        Goal.header.frame_id = map
+        Goal.header.stamp = rospy.Time.now()
+        Goal.pose.position.x = event['position']['x']
+        Goal.pose.position.y = event['position']['y']
+
+        srv = GetPlan()
+        srv.start = start
+        srv.goal = Goal
+        srv.tolerance = 1.5
+        
+        service = "/{0}/move_base/make_plan".format(self.robot_name)
+        try:
+            rospy.wait_for_service(service, 2)
+            plan = self.make_plan(srv.start, srv.goal, srv.tolerance)
+            return plan
+        except rospy.ServiceException:
+            pass
+    
+    # L2 Error
+    def distance_between_pose(self, pose1, pose2):
+        return math.sqrt(pow(pose2.position.x-pose1.position.x, 2) +
+                         pow(pose2.position.y-pose1.position.y, 2))
+
+    def compute_path_length(self, plan):
+        poses = plan.poses
+        pathLength = 0
+        #Iteration among along the poses in order to compute the length
+        for index in range(1, len(poses)):
+            pathLength += self.distance_between_pose(poses[index-1].pose, poses[index].pose)
+        rospy.loginfo("Path Length %s", str(pathLength))
+        return pathLength
+    
+    def calculate_dist(self, msg):
+        msg = {
+            'position': {
+                'x': msg.position.position.x,
+                'y': msg.position.position.y
+                }
+         }
+        
+        plan = self.get_plan(msg)
+        return self.compute_path_length(plan.plan)
+        
+
+   
\ No newline at end of file
diff --git a/hypobot_system/src/navigate.pyc b/hypobot_system/src/navigate.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..426b5dc1ca775b65ce5908074a13f4d12a0631ef
Binary files /dev/null and b/hypobot_system/src/navigate.pyc differ
diff --git a/hypobot_system/src/register_manager.py b/hypobot_system/src/register_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..2334e8b54acff2fa1915146c0669e38fde291648
--- /dev/null
+++ b/hypobot_system/src/register_manager.py
@@ -0,0 +1,55 @@
+import rospy
+from manage_inventory import ManageInventory
+from navigate import Navigate
+
+class Robot():
+    def __init__(self):
+        self.name = None
+        self.inv_manager = None
+        self.nav_controller = None
+        self.attending_event = False
+
+class RobotManager():
+    def __init__(self):
+        self.online_robots = rospy.get_param('online_robots')
+        self.num_online_robots = len(self.online_robots)
+        if len(self.online_robots) == 0:
+            rospy.logerr("FAILURE: No Robots Configured - Check parameters")
+        else:
+            self.agent_1 = Robot()
+            self.agent_1.name = self.online_robots[0]
+            self.agent_1.inv_manager = ManageInventory(self.agent_1.name)
+            self.agent_1.nav_controller = Navigate(self.agent_1.name)
+            rospy.loginfo("{0} is ready".format(self.agent_1.name))
+
+        if len(self.online_robots) == 2:      
+            self.agent_2 = Robot()
+            self.agent_2.name = self.online_robots[1]
+            self.agent_2.inv_manager = ManageInventory(self.agent_2.name)
+            self.agent_2.nav_controller = Navigate(self.agent_2.name)
+            rospy.loginfo("{0} is ready".format(self.agent_2.name))
+    
+    # no. robots available i.e those not attending events
+    def number_robots_available(self):
+        if len(self.online_robots) > 1:
+            return int(not self.agent_1.attending_event) + int(not self.agent_2.attending_event)
+        else:
+            return int(not self.agent_1.attending_event)
+    
+    # validates for whether this is a single or multiple robot supported system
+    # then returns the available robot
+    def get_available_robot(self):
+        if len(self.online_robots) > 1:
+            if self.agent_1.attending_event == False:
+                return self.agent_1
+            else:
+                return self.agent_2
+        else:
+            return self.agent_1
+    
+    # only applies to multiple robot support system
+    def get_available_robots(self):
+        return self.agent_1, self.agent_2
+
+
+    
\ No newline at end of file
diff --git a/hypobot_system/src/register_manager.pyc b/hypobot_system/src/register_manager.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2853d21c9c6bc3d06c4d68fb9e188b9d0b08b645
Binary files /dev/null and b/hypobot_system/src/register_manager.pyc differ
diff --git a/hypobot_system/srv/GetInventory.srv b/hypobot_system/srv/GetInventory.srv
new file mode 100644
index 0000000000000000000000000000000000000000..0cd87112f0ef48da0bef75e0dc55a2b396f741eb
--- /dev/null
+++ b/hypobot_system/srv/GetInventory.srv
@@ -0,0 +1,4 @@
+
+---
+
+string available_inventory
\ No newline at end of file
diff --git a/hypobot_system/srv/UpdateInventory.srv b/hypobot_system/srv/UpdateInventory.srv
new file mode 100644
index 0000000000000000000000000000000000000000..977fd7adc82931d667eb55b7234b110f9c9f4f9d
--- /dev/null
+++ b/hypobot_system/srv/UpdateInventory.srv
@@ -0,0 +1,6 @@
+string payload
+int64 count
+
+---
+
+string available_inventory
\ No newline at end of file