diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py
index 1d06b1660d62780d66b4c579d83ccb5682b634b5..35d293cacb835770bfba183ad0c4502b7e0b9884 100644
--- a/binarycpython/utils/functions.py
+++ b/binarycpython/utils/functions.py
@@ -15,6 +15,9 @@ import numpy as np
 
 import binary_c_python_api
 
+########################################################
+# utility functions
+########################################################
 
 def remove_file(file, verbose=0):
     """
@@ -27,11 +30,8 @@ def remove_file(file, verbose=0):
                 print("Removed {}".format(file))
             os.remove(file)
 
-        # TODO: Put correct exception here.
-        except:
-            print("Error while deleting file {}".format(file))
-            raise FileNotFoundError
-
+        except FileNotFoundError as inst:
+            print("Error while deleting file {}: {}".format(file, inst))
 
 def temp_dir():
     """
@@ -49,13 +49,72 @@ def temp_dir():
 
     return path
 
-
-def output_lines(output):
+def create_hdf5(data_dir, name):
     """
-    Function that outputs the lines that were recieved from the binary_c run.
+    Function to create an hdf5 file from the contents of a directory:
+     - settings file is selected by checking on files ending on settings
+     - data files are selected by checking on files ending with .dat
+
+    TODO: fix missing settingsfiles
     """
-    return output.splitlines()
 
+    # Make HDF5:
+    # Create the file
+    hdf5_filename = os.path.join(data_dir, "{}".format(name))
+    print("Creating {}".format(hdf5_filename))
+    hdf5_file = h5py.File(hdf5_filename, "w")
+
+    # Get content of data_dir
+    content_data_dir = os.listdir(data_dir)
+
+    # Settings
+    if any([file.endswith("_settings.json") for file in content_data_dir]):
+        print("Adding settings to HDF5 file")
+        settings_file = os.path.join(
+            data_dir,
+            [file for file in content_data_dir if file.endswith("_settings.json")][0],
+        )
+
+        with open(settings_file, "r") as settings_file:
+            settings_json = json.load(settings_file)
+
+        # Create settings group
+        settings_grp = hdf5_file.create_group("settings")
+
+        # Write version_string to settings_group
+        settings_grp.create_dataset("used_settings", data=json.dumps(settings_json))
+
+    # Get data files
+    data_files = [el for el in content_data_dir if el.endswith(".dat")]
+    if data_files:
+        print("Adding data to HDF5 file")
+
+        # Create the data group
+        data_grp = hdf5_file.create_group("data")
+
+        # Write the data to the file:
+        # Make sure:
+        for data_file in data_files:
+            # filename stuff
+            filename = data_file
+            full_path = os.path.join(data_dir, filename)
+            base_name = os.path.splitext(os.path.basename(filename))[0]
+
+            # Get header info
+            header_name = "{base_name}_header".format(base_name=base_name)
+            data_headers = np.genfromtxt(full_path, dtype="str", max_rows=1)
+            data_headers = np.char.encode(data_headers)
+            data_grp.create_dataset(header_name, data=data_headers)
+
+            # Add data
+            data = np.loadtxt(full_path, skiprows=1)
+            data_grp.create_dataset(base_name, data=data)
+
+        hdf5_file.close()
+
+########################################################
+# version_info functions
+########################################################
 
 def parse_binary_c_version_info(version_info_string):
     """
@@ -150,229 +209,113 @@ def parse_binary_c_version_info(version_info_string):
 
     return version_info_dict
 
+########################################################
+# binary_c output functions
+########################################################
 
-def create_hdf5(data_dir, name):
+def output_lines(output):
     """
-    Function to create an hdf5 file from the contents of a directory:
-     - settings file is selected by checking on files ending on settings
-     - data files are selected by checking on files ending with .dat
-
-    TODO: fix missing settingsfiles
+    Function that outputs the lines that were recieved from the binary_c run.
     """
+    return output.splitlines()
 
-    # Make HDF5:
-    # Create the file
-    hdf5_filename = os.path.join(data_dir, "{}".format(name))
-    print("Creating {}".format(hdf5_filename))
-    hdf5_file = h5py.File(hdf5_filename, "w")
-
-    # Get content of data_dir
-    content_data_dir = os.listdir(data_dir)
-
-    # Settings
-    if any([file.endswith("_settings.json") for file in content_data_dir]):
-        print("Adding settings to HDF5 file")
-        settings_file = os.path.join(
-            data_dir,
-            [file for file in content_data_dir if file.endswith("_settings.json")][0],
-        )
-
-        with open(settings_file, "r") as settings_file:
-            settings_json = json.load(settings_file)
-
-        # Create settings group
-        settings_grp = hdf5_file.create_group("settings")
-
-        # Write version_string to settings_group
-        settings_grp.create_dataset("used_settings", data=json.dumps(settings_json))
-
-    # Get data files
-    data_files = [el for el in content_data_dir if el.endswith(".dat")]
-    if data_files:
-        print("Adding data to HDF5 file")
-
-        # Create the data group
-        data_grp = hdf5_file.create_group("data")
-
-        # Write the data to the file:
-        # Make sure:
-        for data_file in data_files:
-            # filename stuff
-            filename = data_file
-            full_path = os.path.join(data_dir, filename)
-            base_name = os.path.splitext(os.path.basename(filename))[0]
-
-            # Get header info
-            header_name = "{base_name}_header".format(base_name=base_name)
-            data_headers = np.genfromtxt(full_path, dtype="str", max_rows=1)
-            data_headers = np.char.encode(data_headers)
-            data_grp.create_dataset(header_name, data=data_headers)
-
-            # Add data
-            data = np.loadtxt(full_path, skiprows=1)
-            data_grp.create_dataset(base_name, data=data)
-
-        hdf5_file.close()
-
-
-def get_help_super(print_help=False, fail_silently=True):
-    """
-    Function that first runs get_help_all, and then per argument also run
-    the help function to get as much information as possible.
+def parse_output(output, selected_header):
     """
+    Function that parses output of binary_c:
 
-    # Get help_all information
-    help_all_dict = get_help_all(print_help=False, return_dict=True)
-
-    help_all_super_dict = help_all_dict.copy()
-
-    # Loop over all sections and stuff
-    for section_name in help_all_dict:
-        section = help_all_dict[section_name]
-
-        for parameter_name in section["parameters"].keys():
-            parameter = section["parameters"][parameter_name]
-
-            # Get detailed help info
-            detailed_help = get_help(
-                parameter_name,
-                print_help=False,
-                return_dict=True,
-                fail_silently=fail_silently,
-            )
-
-            if detailed_help:
-                # check whether the descriptions of help_all and detailed help are the same
-                if not fail_silently:
-                    if not parameter["description"] == detailed_help["description"]:
-                        print(json.dumps(parameter, indent=4))
-
-                ## put values into help all super dict
-                # input type
-                parameter["parameter_value_input_type"] = detailed_help[
-                    "parameter_value_input_type"
-                ]
-
-                # default
-                parameter["default"] = detailed_help["default"]
-
-                # macros
-                if "macros" in detailed_help.keys():
-                    parameter["macros"] = detailed_help["macros"]
-
-    if print_help:
-        # TODO: make a pretty print
-        print(json.dumps(help_all_super_dict, indent=4))
+    This function works in two cases:
+    if the caught line contains output like 'example_header time=12.32 mass=0.94 ..'
+    or if the line contains output like 'example_header 12.32 0.94'
 
-    return help_all_super_dict
+    You can give a 'selected_header' to catch any line that starts with that.
+    Then the values will be put into a dictionary.
 
+    TODO: Think about exporting to numpy array or pandas instead of a defaultdict
 
-def get_help_all(print_help=True):
+    TODO: rethink whether this function is necessary at all
     """
-    Function that reads out the output of the help_all api call to binary_c
 
-    print_help: bool, prints all the parameters and their descriptions.
-
-    return_dict:  returns a dictionary
-    """
+    value_dicts = []
 
-    # Call function
-    help_all = binary_c_python_api.return_help_all()
+    # split output on newlines
+    for line in output.split("\n"):
+        # Skip any blank lines
+        if not line == "":
+            split_line = line.split()
 
-    # String manipulation
-    split = help_all.split(
-        "############################################################\n"
-    )
-    cleaned = [el for el in split if not el == "\n"]
+            # Select parts
+            header = split_line[0]
+            values_list = split_line[1:]
 
-    section_nums = [i for i in range(len(cleaned)) if cleaned[i].startswith("#####")]
+            # print(values_list)
+            # Catch line starting with selected header
+            if header == selected_header:
+                # Check if the line contains '=' symbols:
+                value_dict = {}
+                if all("=" in value for value in values_list):
+                    for value in values_list:
+                        key, val = value.split("=")
+                        value_dict[key.strip()] = val.strip()
+                    value_dicts.append(value_dict)
+                else:
+                    if any("=" in value for value in values_list):
+                        raise ValueError(
+                            "Caught line contains some = symbols but not \
+                            all of them do. aborting run"
+                        )
 
-    # Create dicts
-    help_all_dict = {}
+                    for j, val in enumerate(values_list):
+                        value_dict[j] = val
+                    value_dicts.append(value_dict)
 
-    # Select the section name and the contents of that section. Note, not all sections have content!
-    for i in range(len(section_nums)):
-        if not i == len(section_nums) - 1:
-            params = cleaned[section_nums[i] + 1 : section_nums[i + 1]]
-        else:
-            params = cleaned[section_nums[i] + 1 : len(cleaned)]
-        section_name = (
-            cleaned[section_nums[i]]
-            .lstrip("#####")
-            .strip()
-            .replace("Section ", "")
-            .lower()
+    if len(value_dicts) == 0:
+        print(
+            "Sorry, didnt find any line matching your header {}".format(selected_header)
         )
+        return None
 
-        #
-        params_dict = {}
-
-        if params:
+    keys = value_dicts[0].keys()
 
-            # Clean it, replace in-text newlines with a space and then split on newlines.
-            split_params = params[0].strip().replace("\n ", " ").split("\n")
+    # Construct final dict.
+    final_values_dict = defaultdict(list)
+    for value_dict in value_dicts:
+        for key in keys:
+            final_values_dict[key].append(value_dict[key])
 
-            # Process params and descriptions per section
-            for split_param in split_params:
-                split_param_info = split_param.split(" : ")
-                if not len(split_param_info) == 3:
-                    # there are ocassions where the semicolon
-                    # is used in the description text itself.
-                    if len(split_param_info) == 4:
-                        split_param_info = [
-                            split_param_info[0],
-                            ": ".join([split_param_info[1], split_param_info[2]]),
-                            split_param_info[3],
-                        ]
+    return final_values_dict
 
-                    # other occassions?
+########################################################
+# Argument and default value functions
+########################################################
 
-                # Put the information in a dict
-                param_name = split_param_info[0]
-                param_description = split_param_info[1]
-                rest = split_param_info[2]
+def get_defaults(filter_values=False):
+    """
+    Function that calls the binaryc get args function and cast it into a dictionary.
 
-                params_dict[param_name] = {
-                    "param_name": param_name,
-                    "description": param_description,
-                    "rest": rest,
-                }
+    All the values are strings
 
-            # make section_dict
-            section_dict = {
-                "section_name": section_name,
-                "parameters": params_dict.copy(),
-            }
+    filter_values: whether to filter out NULL and Function defaults.
+    """
 
-            # Put in the total dict
-            help_all_dict[section_name] = section_dict.copy()
+    default_output = binary_c_python_api.return_arglines()
+    default_dict = {}
 
-    # Print things
-    if print_help:
-        for section in sorted(help_all_dict.keys()):
-            print(
-                "##################\n###### Section {}\n##################".format(
-                    section
-                )
-            )
-            section_dict = help_all_dict[section]
-            for param_name in sorted(section_dict["parameters"].keys()):
-                param = section_dict["parameters"][param_name]
-                print(
-                    "\n{}:\n\t{}: {}".format(
-                        param["param_name"], param["description"], param["rest"]
-                    )
-                )
+    for default in default_output.split("\n"):
+        if not default in ["__ARG_BEGIN", "__ARG_END", ""]:
+            key, value = default.split(" = ")
+            default_dict[key] = value
 
-    # # Loop over all the parameters an call the help() function on it.
-    # # Takes a long time but this is for testing
-    # for section in help_all_dict.keys():
-    #     section_dict = help_all_dict[section]
-    #     for param in section_dict['parameters'].keys():
-    #         get_help(param)
+    if filter_values:
+        default_dict = filter_arg_dict(default_dict)
 
-    return help_all_dict
+    return default_dict
+
+def get_arg_keys():
+    """
+    Function that return the list of possible keys to give in the arg string
+    """
 
+    return get_defaults().keys()
 
 def filter_arg_dict(arg_dict):
     """
@@ -389,7 +332,6 @@ def filter_arg_dict(arg_dict):
 
     return new_dict
 
-
 def create_arg_string(arg_dict, sort=False, filter_values=False):
     """
     Function that creates the arg string for binary_c.
@@ -410,37 +352,9 @@ def create_arg_string(arg_dict, sort=False, filter_values=False):
     arg_string = arg_string.strip()
     return arg_string
 
-
-def get_defaults(filter_values=False):
-    """
-    Function that calls the binaryc get args function and cast it into a dictionary.
-
-    All the values are strings
-
-    filter_values: whether to filter out NULL and Function defaults.
-    """
-
-    default_output = binary_c_python_api.return_arglines()
-    default_dict = {}
-
-    for default in default_output.split("\n"):
-        if not default in ["__ARG_BEGIN", "__ARG_END", ""]:
-            key, value = default.split(" = ")
-            default_dict[key] = value
-
-    if filter_values:
-        default_dict = filter_arg_dict(default_dict)
-
-    return default_dict
-
-
-def get_arg_keys():
-    """
-    Function that return the list of possible keys to give in the arg string
-    """
-
-    return get_defaults().keys()
-
+########################################################
+# Help functions
+########################################################
 
 def get_help(param_name="", print_help=True, fail_silently=False):
     """
@@ -540,70 +454,174 @@ def get_help(param_name="", print_help=True, fail_silently=False):
             )
         return None
 
+def get_help_all(print_help=True):
+    """
+    Function that reads out the output of the help_all api call to binary_c
 
-def parse_output(output, selected_header):
+    print_help: bool, prints all the parameters and their descriptions.
+
+    return_dict:  returns a dictionary
     """
-    Function that parses output of binary_c:
 
-    This function works in two cases:
-    if the caught line contains output like 'example_header time=12.32 mass=0.94 ..'
-    or if the line contains output like 'example_header 12.32 0.94'
+    # Call function
+    help_all = binary_c_python_api.return_help_all()
 
-    You can give a 'selected_header' to catch any line that starts with that.
-    Then the values will be put into a dictionary.
+    # String manipulation
+    split = help_all.split(
+        "############################################################\n"
+    )
+    cleaned = [el for el in split if not el == "\n"]
 
-    TODO: Think about exporting to numpy array or pandas instead of a defaultdict
+    section_nums = [i for i in range(len(cleaned)) if cleaned[i].startswith("#####")]
+
+    # Create dicts
+    help_all_dict = {}
+
+    # Select the section name and the contents of that section. Note, not all sections have content!
+    for i in range(len(section_nums)):
+        if not i == len(section_nums) - 1:
+            params = cleaned[section_nums[i] + 1 : section_nums[i + 1]]
+        else:
+            params = cleaned[section_nums[i] + 1 : len(cleaned)]
+        section_name = (
+            cleaned[section_nums[i]]
+            .lstrip("#####")
+            .strip()
+            .replace("Section ", "")
+            .lower()
+        )
+
+        #
+        params_dict = {}
+
+        if params:
+
+            # Clean it, replace in-text newlines with a space and then split on newlines.
+            split_params = params[0].strip().replace("\n ", " ").split("\n")
+
+            # Process params and descriptions per section
+            for split_param in split_params:
+                split_param_info = split_param.split(" : ")
+                if not len(split_param_info) == 3:
+                    # there are ocassions where the semicolon
+                    # is used in the description text itself.
+                    if len(split_param_info) == 4:
+                        split_param_info = [
+                            split_param_info[0],
+                            ": ".join([split_param_info[1], split_param_info[2]]),
+                            split_param_info[3],
+                        ]
+
+                    # other occassions?
+
+                # Put the information in a dict
+                param_name = split_param_info[0]
+                param_description = split_param_info[1]
+                rest = split_param_info[2]
+
+                params_dict[param_name] = {
+                    "param_name": param_name,
+                    "description": param_description,
+                    "rest": rest,
+                }
+
+            # make section_dict
+            section_dict = {
+                "section_name": section_name,
+                "parameters": params_dict.copy(),
+            }
+
+            # Put in the total dict
+            help_all_dict[section_name] = section_dict.copy()
+
+    # Print things
+    if print_help:
+        for section in sorted(help_all_dict.keys()):
+            print(
+                "##################\n###### Section {}\n##################".format(
+                    section
+                )
+            )
+            section_dict = help_all_dict[section]
+            for param_name in sorted(section_dict["parameters"].keys()):
+                param = section_dict["parameters"][param_name]
+                print(
+                    "\n{}:\n\t{}: {}".format(
+                        param["param_name"], param["description"], param["rest"]
+                    )
+                )
+
+    # # Loop over all the parameters an call the help() function on it.
+    # # Takes a long time but this is for testing
+    # for section in help_all_dict.keys():
+    #     section_dict = help_all_dict[section]
+    #     for param in section_dict['parameters'].keys():
+    #         get_help(param)
+
+    return help_all_dict
+
+def get_help_super(print_help=False, fail_silently=True):
+    """
+    Function that first runs get_help_all, and then per argument also run
+    the help function to get as much information as possible.
     """
 
-    value_dicts = []
+    # Get help_all information
+    help_all_dict = get_help_all(print_help=False)
+    for section_name in help_all_dict:
+        section = help_all_dict[section_name]
+        print(section_name)
+        for parameter_name in section["parameters"].keys():
+            print("\t",parameter_name)
 
-    # split output on newlines
-    for line in output.split("\n"):
-        # Skip any blank lines
-        if not line == "":
-            split_line = line.split()
+    help_all_super_dict = help_all_dict.copy()
 
-            # Select parts
-            header = split_line[0]
-            values_list = split_line[1:]
+    # Loop over all sections and stuff
+    for section_name in help_all_dict:
+        # Skipping the section i/o because that one shouldn't be available to python anyway
+        if not section_name == "i/o":
+            section = help_all_dict[section_name]
+
+            for parameter_name in section["parameters"].keys():
+                parameter = section["parameters"][parameter_name]
+
+                # Get detailed help info
+                detailed_help = get_help(
+                    parameter_name,
+                    print_help=False,
+                    fail_silently=fail_silently,
+                )
 
-            # print(values_list)
-            # Catch line starting with selected header
-            if header == selected_header:
-                # Check if the line contains '=' symbols:
-                value_dict = {}
-                if all("=" in value for value in values_list):
-                    for value in values_list:
-                        key, val = value.split("=")
-                        value_dict[key.strip()] = val.strip()
-                    value_dicts.append(value_dict)
-                else:
-                    if any("=" in value for value in values_list):
-                        raise ValueError(
-                            "Caught line contains some = symbols but not \
-                            all of them do. aborting run"
-                        )
+                if detailed_help:
+                    # check whether the descriptions of help_all and detailed help are the same
+                    if not fail_silently:
+                        if not parameter["description"] == detailed_help["description"]:
+                            print(json.dumps(parameter, indent=4))
 
-                    for j, val in enumerate(values_list):
-                        value_dict[j] = val
-                    value_dicts.append(value_dict)
+                    ## put values into help all super dict
+                    # input type
+                    parameter["parameter_value_input_type"] = detailed_help[
+                        "parameter_value_input_type"
+                    ]
 
-    if len(value_dicts) == 0:
-        print(
-            "Sorry, didnt find any line matching your header {}".format(selected_header)
-        )
-        return None
+                    # default
+                    parameter["default"] = detailed_help["default"]
 
-    keys = value_dicts[0].keys()
+                    # macros
+                    if "macros" in detailed_help.keys():
+                        parameter["macros"] = detailed_help["macros"]
 
-    # Construct final dict.
-    final_values_dict = defaultdict(list)
-    for value_dict in value_dicts:
-        for key in keys:
-            final_values_dict[key].append(value_dict[key])
+                section["parameters"][parameter_name] = parameter
 
-    return final_values_dict
 
+    if print_help:
+        print(json.dumps(help_all_super_dict, indent=4))
+
+    return help_all_super_dict
+
+########################################################
+# logfile functions
+########################################################
 
 def load_logfile(logfile):
     """
@@ -643,3 +661,130 @@ def load_logfile(logfile):
         event_list.append(" ".join(split_line[9:]))
 
     print(event_list)
+
+########################################################
+# Ensemble dict functions
+########################################################
+
+def inspect_dict(dict_1, indent=0):
+    """
+    Function to inspect a dict.
+
+    Works recursively if there is a nested dict.
+
+    Prints out keys and their value types
+    """
+
+    for key, value in dict_1.items():
+        print("\t"*indent, key, type(value))
+        if isinstance(value, dict):
+            inspect_dict(value, indent=indent+1)
+
+def merge_dicts(dict_1, dict_2):
+    """
+    Function to merge two dictionaries.
+
+    Behaviour:
+
+    When dict keys are present in both, we decide based on the value types how to combine them:
+    - dictionaries will be merged by calling recursively calling this function again
+    - numbers will be added
+    - (opt) lists will be appended
+
+    - In the case that the instances do now match: for now I will raise an error
+
+    When dict keys are only present in one of either, we just add the content to the new dict
+    """
+
+    # Set up new dict
+    new_dict = {}
+
+    #
+    keys_1 = dict_1.keys()
+    keys_2 = dict_2.keys()
+
+    # Find overlapping keys of both dicts
+    overlapping_keys = set(keys_1).intersection(set(keys_2))
+
+    # Find the keys that are unique
+    unique_to_dict_1 = set(keys_1).difference(set(keys_2))
+    unique_to_dict_2 = set(keys_2).difference(set(keys_1))
+
+    # Add the unique keys to the new dict
+    for key in unique_to_dict_1:
+        if isinstance(dict_1[key], (float, int)):
+            new_dict[key] = dict_1[key]
+        else:
+            new_dict[key] = dict_1[key].deepcopy()
+
+    for key in unique_to_dict_2:
+        if isinstance(dict_2[key], (float, int)):
+            new_dict[key] = dict_2[key]
+        else:
+            new_dict[key] = dict_2[key].deepcopy()
+
+    # Go over the common keys:
+    for key in overlapping_keys:
+        # See whether the types are actually similar
+        if not type(dict_1[key]) is type(dict_2[key]):
+            print("Error {} and {} are not of the same type and cannot be merged".format(
+                dict_1[key], dict_2[key]))
+            raise ValueError
+
+        # TODO: Create a matrix of combinations here.
+        # TODO: Could maybe be more compact
+        else:
+            # ints
+            if isinstance(dict_1[key], int) and isinstance(dict_2[key], int):
+                new_dict[key] = dict_1[key] + dict_2[key]
+
+            # floats
+            elif isinstance(dict_1[key], float) and isinstance(dict_2[key], float):
+                new_dict[key] = dict_1[key] + dict_2[key]
+
+            # lists
+            elif isinstance(dict_1[key], list) and isinstance(dict_2[key], list):
+                new_dict[key] = dict_1[key] + dict_2[key]
+
+            # dicts
+            elif isinstance(dict_1[key], dict) and isinstance(dict_2[key], dict):
+                new_dict[key] = merge_dicts(dict_1[key], dict_2[key])
+
+            else:
+                print("Object types {},{} not supported".format(
+                    type(dict_1[key]), type(dict_2[key])))
+    #
+    return new_dict
+
+class binarycDecoder(json.JSONDecoder):
+    """
+    Custom decoder to transform the numbers that are strings to actual floats
+    """
+
+    def decode(self, s):
+        result = super().decode(s)  # result = super(Decoder, self).decode(s) for Python 2.x
+        return self._decode(result)
+
+    def _decode(self, o):
+        """
+        Depending on the type of object is will determine whether to loop over the elements,
+        or try to change the type of the object from string to float
+
+        The try except might be a somewhat rough solution but it catches all cases.
+        """
+
+
+        # Check if we can turn it into a float
+        # if isinstance(o, str) or isinstance(o, unicode):
+        if isinstance(o, str):
+            try:
+                return float(o)
+            except ValueError:
+                return o
+        elif isinstance(o, dict):
+            return {k: self._decode(v) for k, v in o.items()}
+        elif isinstance(o, list):
+            return [self._decode(v) for v in o]
+        else:
+            return o
+
diff --git a/tests/test_persistent_data.py b/tests/test_persistent_data.py
index 40b3cef17ab799593217dfc25dec04554fe1af83..b1ecc360444a1ce4deb977fb870ca86a0873da67 100644
--- a/tests/test_persistent_data.py
+++ b/tests/test_persistent_data.py
@@ -8,46 +8,7 @@ import json
 import textwrap
 import binary_c_python_api
 
-from mergedict import ConfigDict
-from mergedict import MergeDict
-
-class SumDict(MergeDict):
-      @MergeDict.dispatch(float)
-      def merge_float(this, other):
-          return this + other
-
-
-class Decoder(json.JSONDecoder):
-    """
-    Custom decoder to transform the numbers that are strings to actual floats
-    """
-
-    def decode(self, s):
-        result = super().decode(s)  # result = super(Decoder, self).decode(s) for Python 2.x
-        return self._decode(result)
-
-    def _decode(self, o):
-        """
-        Depending on the type of object is will determine whether to loop over the elements,
-        or try to change the type of the object from string to float
-
-        The try except might be a somewhat rough solution but it catches all cases.
-        """
-
-
-        # Check if we can turn it into a float
-        # if isinstance(o, str) or isinstance(o, unicode):
-        if isinstance(o, str):
-            try:
-                return float(o)
-            except ValueError:
-                return o
-        elif isinstance(o, dict):
-            return {k: self._decode(v) for k, v in o.items()}
-        elif isinstance(o, list):
-            return [self._decode(v) for v in o]
-        else:
-            return o
+from binarycpython.utils.functions import binarycDecoder
 
 ####
 
@@ -108,7 +69,6 @@ def test_passing_persistent_data_to_run_system():
     # printf("combined double system vs deferred double system?:")
 
 def ensemble_output():
-
     m1 = 15.0  # Msun
     m2 = 14.0  # Msun
     separation = 0  # 0 = ignored, use period
@@ -130,7 +90,7 @@ def ensemble_output():
     ensemble_jsons_1 = [line for line in output_1.splitlines() if line.startswith("ENSEMBLE_JSON")]
 
     start = time.time()
-    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=Decoder)
+    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
     stop = time.time()
 
     print(json.dumps(json_1, indent=4))
@@ -169,8 +129,8 @@ def adding_ensemble_output():
     ensemble_jsons_1 = [line for line in output_1.splitlines() if line.startswith("ENSEMBLE_JSON")]
     ensemble_jsons_2 = [line for line in output_2.splitlines() if line.startswith("ENSEMBLE_JSON")]
 
-    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=Decoder)
-    json_2 = json.loads(ensemble_jsons_2[0][len("ENSEMBLE_JSON "):], cls=Decoder)
+    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
+    json_2 = json.loads(ensemble_jsons_2[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
 
     # test_1_total_dict = SumDict(json_1)
     # test_1_total_dict.merge(json_2)
@@ -211,7 +171,7 @@ def adding_ensemble_output():
 
     ensemble_jsons_deferred = [line for line in output_total_deferred.splitlines() if line.startswith("ENSEMBLE_JSON")]
 
-    json_deferred = json.loads(ensemble_jsons_deferred[0][len("ENSEMBLE_JSON "):], cls=Decoder)
+    json_deferred = json.loads(ensemble_jsons_deferred[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
 
     with open("json_deferred.json", 'w') as f:
         f.write(json.dumps(json_deferred, indent=4))
@@ -237,7 +197,7 @@ def adding_ensemble_output():
 
     ensemble_jsons_deferred_and_output = [line for line in output_2_deferred_and_output.splitlines() if line.startswith("ENSEMBLE_JSON")]
 
-    json_deferred_and_output = json.loads(ensemble_jsons_deferred_and_output[0][len("ENSEMBLE_JSON "):], cls=Decoder)
+    json_deferred_and_output = json.loads(ensemble_jsons_deferred_and_output[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
 
     with open("json_deferred_and_output.json", 'w') as f:
         f.write(json.dumps(json_deferred_and_output, indent=4))
@@ -245,6 +205,10 @@ def adding_ensemble_output():
     print("Single deferred done\n")
 
 def test_free_and_json_output():
+    """
+    Function that tests the freeing of the memory adress and the output of the json
+    """
+
     m1 = 2  # Msun
     m2 = 0.1  # Msun
     separation = 0  # 0 = ignored, use period
@@ -263,12 +227,14 @@ def test_free_and_json_output():
     argstring_1 = argstring_template.format(
         m1, m2, separation, orbital_period, eccentricity, metallicity, max_evolution_time, "1")
 
+    # Evolve and defer output
     print("evolving")
     output_1_deferred = binary_c_python_api.run_system(argstring=argstring_1, persistent_data_memaddr=persistent_data_memaddr)
     print("Evolved")
     print("Output:")
     print(textwrap.indent(str(output_1_deferred), "\t"))
 
+    # Free memory adress
     print("freeing")
     json_output_by_freeing = binary_c_python_api.free_persistent_data_memaddr_and_return_json_output(persistent_data_memaddr)
     print("Freed")
@@ -298,7 +264,7 @@ def full_output():
     ensemble_jsons_1 = [line for line in output_1.splitlines() if line.startswith("ENSEMBLE_JSON")]
 
     start = time.time()
-    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=Decoder)
+    json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder)
     stop = time.time()
 
     print("took {}s to decode".format(stop-start))
@@ -306,6 +272,10 @@ def full_output():
     with open("json_full_ensemble.json", 'w') as f:
         f.write(json.dumps(json_1, indent=4))
 
+
+
+
+
 ####
 if __name__ == "__main__":
     # test_return_persistent_data_memaddr()
@@ -313,4 +283,4 @@ if __name__ == "__main__":
     # ensemble_output()
     # adding_ensemble_output()
     # test_free_and_json_output()
-    full_output()
\ No newline at end of file
+    full_output()