diff --git a/binarycpython/tests/test_functions.py b/binarycpython/tests/test_functions.py
index 86c24698c9575feb330ef3ac62f738c1924e39df..069f6094b07bf3279fc29964847220cd5ba0c703 100644
--- a/binarycpython/tests/test_functions.py
+++ b/binarycpython/tests/test_functions.py
@@ -13,6 +13,7 @@ from binarycpython.utils.functions import *
 
 TMP_DIR = temp_dir("tests", "test_functions")
 
+
 class dummy:
     """
     Dummy class to be used in the merge_dicts
@@ -111,7 +112,9 @@ class test_temp_dir(unittest.TestCase):
         general_temp_dir = tempfile.gettempdir()
 
         self.assertTrue(
-            os.path.isdir(os.path.join(general_temp_dir, "binary_c_python-{}".format(username)))
+            os.path.isdir(
+                os.path.join(general_temp_dir, "binary_c_python-{}".format(username))
+            )
         )
         self.assertTrue(
             os.path.join(general_temp_dir, "binary_c_python-{}".format(username))
diff --git a/binarycpython/utils/dicts.py b/binarycpython/utils/dicts.py
index 77b47f8c83905ed7cb9f6a4cc4b511e15c1cb96e..d0c046dd08b0e7b46ccb582f7d6ac6dc67cddd95 100644
--- a/binarycpython/utils/dicts.py
+++ b/binarycpython/utils/dicts.py
@@ -17,20 +17,24 @@ from collections import (
 # the float conversion, nothing else.
 def keys_to_floats(json_data):
     # assumes nested dicts ...
-    #new_data = {}
+    # new_data = {}
 
     # but this copies the variable type, but has some
     # pointless copying
-    #new_data = copy.copy(json_data)
-    #new_data.clear()
+    # new_data = copy.copy(json_data)
+    # new_data.clear()
 
     # this adopts the type correctly *and* is fast
     new_data = type(json_data)()
 
-    for k,v in json_data.items():
+    for k, v in json_data.items():
         if isinstance(v, list):
-            v = [ keys_to_floats(item) if isinstance(item, collections.abc.Mapping) \
-                          else item for item in v ]
+            v = [
+                keys_to_floats(item)
+                if isinstance(item, collections.abc.Mapping)
+                else item
+                for item in v
+            ]
         elif isinstance(v, collections.abc.Mapping):
             # dict, ordereddict, etc.
             v = keys_to_floats(v)
@@ -41,6 +45,7 @@ def keys_to_floats(json_data):
             new_data[k] = v
     return new_data
 
+
 def recursive_change_key_to_float(input_dict):
     """
     Function to recursively change the key to float
@@ -99,6 +104,7 @@ def recursive_change_key_to_string(input_dict):
 
     return new_dict
 
+
 ############################################################
 # code to walk a dictionary recursively based on
 # https://stackoverflow.com/questions/13687924/setting-a-value-in-a-nested-python-dictionary-given-a-list-of-indices-and-value
@@ -106,24 +112,27 @@ def _nested_set(dic, keys, value):
     for key in keys[:-1]:
         dic = dic.setdefault(key, {})
     dic[keys[-1]] = value
+
+
 def _nested_get(dic, keys):
     for key in keys[:-1]:
         dic = dic.setdefault(key, {})
     return dic[keys[-1]]
 
+
 # function to walk through the dictionary, multiplying
 # only float values by a const
-def _recursive_normalize_floats(path,d,const,parent=None,ignore=None):
+def _recursive_normalize_floats(path, d, const, parent=None, ignore=None):
     if not parent:
         parent = d
-    for k,v in d.items():
+    for k, v in d.items():
         if ignore and k in ignore:
             continue
 
-        if isinstance(v,float):
+        if isinstance(v, float):
             path.append(k)
             # must be a float, multiply by the constant
-            _nested_set(parent,path,v * const)
+            _nested_set(parent, path, v * const)
             path.pop()
         elif isinstance(v, str) or isinstance(v, int):
             path.append(k)
@@ -134,16 +143,20 @@ def _recursive_normalize_floats(path,d,const,parent=None,ignore=None):
             path.pop()
         # dicts
         # note: isinstance isn't enough, we need to check the Mapping
-        elif isinstance(v,collections.abc.Mapping):
+        elif isinstance(v, collections.abc.Mapping):
             path.append(k)
             # nested dict
-            _recursive_normalize_floats(path,v,const,parent=parent)
+            _recursive_normalize_floats(path, v, const, parent=parent)
             path.pop()
         else:
-            print ("###Type {} not recognized: {}.{}={}".format(type(v), ".".join(path),k, v))
+            print(
+                "###Type {} not recognized: {}.{}={}".format(
+                    type(v), ".".join(path), k, v
+                )
+            )
 
 
-def multiply_float_values(d,const,ignore=None):
+def multiply_float_values(d, const, ignore=None):
     """
     multiply_float_values : A function to recursively multiply values of a (nested) dictionary that are floats by a constant. Nested dictionaries call this function recursively.
 
@@ -151,8 +164,9 @@ def multiply_float_values(d,const,ignore=None):
         d = the dictionary
         const = the constant that multiplies float values
     """
-    path=[]
-    _recursive_normalize_floats(path,d,const,parent=d,ignore=ignore)
+    path = []
+    _recursive_normalize_floats(path, d, const, parent=d, ignore=ignore)
+
 
 def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
     """
@@ -293,6 +307,7 @@ def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
     #
     return new_dict
 
+
 class AutoVivificationDict(dict):
     """
     Implementation of perl's autovivification feature, by overriding the
@@ -322,6 +337,7 @@ class AutoVivificationDict(dict):
             self = other
         return self
 
+
 def inspect_dict(
     input_dict: dict, indent: int = 0, print_structure: bool = True
 ) -> dict:
@@ -535,7 +551,7 @@ def update_dicts(dict_1: dict, dict_2: dict) -> dict:
     # so instead we take a (shallow) copy of dict_1 which will
     # have the same type as dict_1, then clear it. (There must
     # be a better way to do this...)
-    new_dict = dict_1.copy() # OrderedDict()  # TODO: check if this still works
+    new_dict = dict_1.copy()  # OrderedDict()  # TODO: check if this still works
     new_dict.clear()
 
     keys_1 = dict_1.keys()
@@ -673,8 +689,8 @@ def custom_sort_dict(input_dict):
         return new_dict
     return input_dict
 
-def filter_dict(arg_dict: dict,
-                filter_list: list) -> dict:
+
+def filter_dict(arg_dict: dict, filter_list: list) -> dict:
     """
     Function to filter out keys that contain values included in
     filter_list
diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py
index 9e6658e771d0ac16047bac4e87c741212667297c..d6a99452af87c021707bd06012bae4695ca087f9 100644
--- a/binarycpython/utils/distribution_functions.py
+++ b/binarycpython/utils/distribution_functions.py
@@ -138,6 +138,7 @@ def const(
     prob = 1.0 / (max_bound - min_bound)
     return prob
 
+
 @functools.lru_cache(maxsize=16)
 def powerlaw_constant(
     min_val: Union[int, float], max_val: Union[int, float], k: Union[int, float]
@@ -206,6 +207,7 @@ def powerlaw(
     # )
     return prob
 
+
 @functools.lru_cache(maxsize=16)
 def calculate_constants_three_part_powerlaw(
     m0: Union[int, float],
@@ -326,6 +328,7 @@ def three_part_powerlaw(
 
     return prob
 
+
 @functools.lru_cache(maxsize=16)
 def gaussian_normalizing_const(
     mean: Union[int, float],
@@ -830,17 +833,17 @@ def Izzard2012_period_distribution(
         N = 200.0  # Resolution for normalisation. I hope 1000 is enough
         dlP = (10.0 - log10Pmin) / N
         C = 0  # normalisation constant.
-        #print("LOOP",log10Pmin)
+        # print("LOOP",log10Pmin)
         for lP in np.arange(log10Pmin, 10, dlP):
             C += dlP * Izzard2012_period_distribution(10 ** lP, M1, log10Pmin)
 
         distribution_constants["Izzard2012"][M1][log10Pmin] = 1.0 / C
-        #print(
+        # print(
         # "Normalisation constant for Izzard2012 M={} (log10Pmin={}) is\
         # {}\n".format(
         #     M1, log10Pmin, distribution_constants["Izzard2012"][M1][log10Pmin]
         # )
-        #)
+        # )
 
     lP = math.log10(P)
     # log period
@@ -1001,7 +1004,9 @@ def poisson(lambda_val, n, nmax=None, verbosity=0):
     distribution_constants["poisson_cache"][cachekey] = p_val
 
     verbose_print(
-        "\tMoe and di Stefano 2017: Poisson({}, {}, {}): {}".format(lambda_val, n, nmax, p_val),
+        "\tMoe and di Stefano 2017: Poisson({}, {}, {}): {}".format(
+            lambda_val, n, nmax, p_val
+        ),
         verbosity,
         _MOE2017_VERBOSITY_LEVEL,
     )
@@ -1227,7 +1232,9 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0):
         )
 
     verbose_print(
-        "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: {}".format(str(result)),
+        "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: {}".format(
+            str(result)
+        ),
         verbosity,
         _MOE2017_VERBOSITY_LEVEL,
     )
@@ -1771,7 +1778,13 @@ def calc_e_integral(
 
 
 def calc_P_integral(
-        options, min_logP, max_logP, integrals_string, interpolator_name, mass_string, verbosity=0
+    options,
+    min_logP,
+    max_logP,
+    integrals_string,
+    interpolator_name,
+    mass_string,
+    verbosity=0,
 ):
     """
     Function to calculate the P integral
@@ -1855,9 +1868,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0):
     """
 
     verbose_print(
-        "\tMoe_di_Stefano_2017_pdf with options:\n\t\t{}".format(
-            json.dumps(options)
-        ),
+        "\tMoe_di_Stefano_2017_pdf with options:\n\t\t{}".format(json.dumps(options)),
         verbosity,
         _MOE2017_VERBOSITY_LEVEL,
     )
@@ -1984,7 +1995,8 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0):
                         ],  # Contains the table of data
                         nparams=3,  # log10M, log10P, e
                         ndata=1,  #
-                        verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1),
+                        verbosity=verbosity
+                        - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1),
                     )
                     verbose_print(
                         "\tMoe_di_Stefano_2017_pdf: Created new e interpolator: {}".format(
@@ -2010,11 +2022,17 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0):
         # Calculate P integral or use cached value
 
         # get the periods from the Moecahe
-        min_logP = float(Moecache['logperiods'][0])
-        max_logP = float(Moecache['logperiods'][-1])
+        min_logP = float(Moecache["logperiods"][0])
+        max_logP = float(Moecache["logperiods"][-1])
 
         calc_P_integral(
-            options, min_logP, max_logP, "P_integrals", "rinterpolator_log10P", "M_1", verbosity
+            options,
+            min_logP,
+            max_logP,
+            "P_integrals",
+            "rinterpolator_log10P",
+            "M_1",
+            verbosity,
         )
 
         # Set probabilty for P1
@@ -2132,13 +2150,11 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0):
 
             # TODO: Is this a correct assumption?
             max_sep = 10.0 * options["sep"] * (1.0 + options["ecc"])
-            min_P2 = calc_period_from_sep(options["M_1+M_2"],
-                                          options["mmin"],
-                                          max_sep)
+            min_P2 = calc_period_from_sep(options["M_1+M_2"], options["mmin"], max_sep)
             min_logP2 = math.log10(min_P2)
-            #max_logP2 = 10.0
-            #min_logP = Moecache['logperiods'][0]
-            max_logP2 = float(Moecache['logperiods'][-1])
+            # max_logP2 = 10.0
+            # min_logP = Moecache['logperiods'][0]
+            max_logP2 = float(Moecache["logperiods"][-1])
 
             if options["P2"] < min_P2:
                 # period is too short : system is not hierarchical
@@ -2332,9 +2348,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0):
 
     # check for input of multiplicity
     elif multiplicity not in range(1, 5):
-        msg = "\tMoe_di_Stefano_2017_pdf: Unknown multiplicity {}".format(
-            multiplicity
-        )
+        msg = "\tMoe_di_Stefano_2017_pdf: Unknown multiplicity {}".format(multiplicity)
         verbose_print(
             msg,
             verbosity,
diff --git a/binarycpython/utils/ensemble.py b/binarycpython/utils/ensemble.py
index 5a2ff362b91a1dc045fb0ddc53925a8b9b4254f8..6b2a0ab3b5ab4bb2b86ba7f988c288f4bb0e7964 100644
--- a/binarycpython/utils/ensemble.py
+++ b/binarycpython/utils/ensemble.py
@@ -7,7 +7,12 @@ population ensemble using the binarycpython package
 import astropy.units as u
 import binarycpython.utils.moe_di_stefano_2017_data as moe_di_stefano_2017_data
 from binarycpython import _binary_c_bindings
-from binarycpython.utils.dicts import keys_to_floats,recursive_change_key_to_float,custom_sort_dict,recursive_change_key_to_string
+from binarycpython.utils.dicts import (
+    keys_to_floats,
+    recursive_change_key_to_float,
+    custom_sort_dict,
+    recursive_change_key_to_string,
+)
 import bz2
 import collections
 from colorama import Fore, Back, Style
@@ -36,85 +41,95 @@ import types
 from typing import Union, Any
 
 import simplejson
-#import orjson
 
+# import orjson
 
-def ensemble_setting(ensemble,parameter_name):
+
+def ensemble_setting(ensemble, parameter_name):
     """
     Function to get the setting of parameter_name in the given ensemble, or return the default value.
     """
     value = None
 
     try:
-        value = ensemble['metadata']['settings']['population_settings']['bse_options'][parameter_name]
+        value = ensemble["metadata"]["settings"]["population_settings"]["bse_options"][
+            parameter_name
+        ]
     except KeyError:
         value = None
 
     if value is None:
         try:
-            value = ensemble['metadata']['settings']['population_settings']['grid_options'][parameter_name]
+            value = ensemble["metadata"]["settings"]["population_settings"][
+                "grid_options"
+            ][parameter_name]
         except KeyError:
             value = None
 
     if value is None:
         try:
-            value = ensemble['metadata']['settings']['population_settings']['custom_options'][parameter_name]
+            value = ensemble["metadata"]["settings"]["population_settings"][
+                "custom_options"
+            ][parameter_name]
         except KeyError:
             value = None
 
     # not found, try the default
     if value is None:
         try:
-            value = ensemble['metadata']['settings']['binary_c_defaults'][parameter_name]
+            value = ensemble["metadata"]["settings"]["binary_c_defaults"][
+                parameter_name
+            ]
         except KeyError:
             value = None
 
     return value
 
+
 def open_ensemble(filename):
     """
     Function to open an ensemble at filename for reading and decompression if required.
     """
     compression = ensemble_compression(filename)
-    if ensemble_file_type(filename) is 'msgpack':
-        flags = 'rb'
+    if ensemble_file_type(filename) is "msgpack":
+        flags = "rb"
     else:
-        flags = 'rt'
-    if compression is 'bzip2':
-        file_object = bz2.open(filename,flags)
-    elif compression is 'gzip':
-        file_object = gzip.open(filename,flags)
+        flags = "rt"
+    if compression is "bzip2":
+        file_object = bz2.open(filename, flags)
+    elif compression is "gzip":
+        file_object = gzip.open(filename, flags)
     else:
-        file_object = open(filename,flags)
+        file_object = open(filename, flags)
     return file_object
 
+
 def ensemble_compression(filename):
     """
     Return the compression type of the ensemble file, based on its filename extension.
     """
-    if filename.endswith('.bz2'):
-        return 'bzip2'
-    elif filename.endswith('.gz'):
-        return 'gzip'
+    if filename.endswith(".bz2"):
+        return "bzip2"
+    elif filename.endswith(".gz"):
+        return "gzip"
     else:
         return None
 
+
 def ensemble_file_type(filename):
     """
     Returns the file type of an ensemble file.
     """
-    if '.json' in filename:
-        filetype = 'JSON'
-    elif '.msgpack' in filename:
-        filetype = 'msgpack'
+    if ".json" in filename:
+        filetype = "JSON"
+    elif ".msgpack" in filename:
+        filetype = "msgpack"
     else:
         filetype = None
     return filetype
 
-def load_ensemble(filename,
-                  convert_float_keys=True,
-                  select_keys=None,
-                  timing=False):
+
+def load_ensemble(filename, convert_float_keys=True, select_keys=None, timing=False):
     """
     Function to load an ensemeble file, even if it is compressed,
     and return its contents to as a Python dictionary.
@@ -134,60 +149,67 @@ def load_ensemble(filename,
     filetype = ensemble_file_type(filename)
 
     if not filetype or not file_object:
-        print("Unknown filetype : your ensemble should be saved either as JSON or msgpack data.")
+        print(
+            "Unknown filetype : your ensemble should be saved either as JSON or msgpack data."
+        )
         sys.exit()
 
-    with Halo(text='Loading', interval=250, spinner='moon',color='yellow'):
+    with Halo(text="Loading", interval=250, spinner="moon", color="yellow"):
         tstart = time.time()
         _loaded = False
+
         def _hook(obj):
             nonlocal _loaded
             if _loaded == False:
                 _loaded = True
-                print("\nLoaded {} data, now putting in a dictionary".format(filetype),
-                      flush=True)
+                print(
+                    "\nLoaded {} data, now putting in a dictionary".format(filetype),
+                    flush=True,
+                )
             return obj
 
-        if filetype is 'JSON':
+        if filetype is "JSON":
             # orjson promises to be fast, but it doesn't seem to be
             # and fails on "Infinity"... oops
-            #data = orjson.loads(file_object.read())
+            # data = orjson.loads(file_object.read())
 
             # simplejson is faster than standard json and "just works"
             # on the big Moe set in 37s
-            data = simplejson.load(file_object,
-                                   object_hook=_hook)
+            data = simplejson.load(file_object, object_hook=_hook)
 
             # standard json module
             # on the big Moe set takes 42s
-            #data = json.load(file_object,
+            # data = json.load(file_object,
             #                 object_hook=_hook)
-        elif filetype is 'msgpack':
-            data = msgpack.load(file_object,
-                                object_hook=_hook)
+        elif filetype is "msgpack":
+            data = msgpack.load(file_object, object_hook=_hook)
 
         if timing:
-            print("\n\nTook {} s to load the data\n\n".format(time.time() - tstart),
-                  flush=True)
+            print(
+                "\n\nTook {} s to load the data\n\n".format(time.time() - tstart),
+                flush=True,
+            )
 
     # strip non-selected keys, if a list is given in select_keys
     if select_keys:
-        keys = list(data['ensemble'].keys())
+        keys = list(data["ensemble"].keys())
         for key in keys:
             if not key in select_keys:
-                del data['ensemble'][key]
+                del data["ensemble"][key]
 
     # perhaps convert floats?
     tstart = time.time()
     if convert_float_keys:
         # timings are for 100 iterations on the big Moe data set
-        #data = format_ensemble_results(data) # 213s
-        #data = recursive_change_key_to_float(data) # 61s
-        data = keys_to_floats(data) # 6.94s
+        # data = format_ensemble_results(data) # 213s
+        # data = recursive_change_key_to_float(data) # 61s
+        data = keys_to_floats(data)  # 6.94s
 
     if timing:
-        print("\n\nTook {} s to convert floats\n\n".format(time.time() - tstart),
-              flush=True)
+        print(
+            "\n\nTook {} s to convert floats\n\n".format(time.time() - tstart),
+            flush=True,
+        )
 
     # return data
     return data
@@ -334,6 +356,7 @@ class BinaryCEncoder(json.JSONEncoder):
         # Let the base class default method raise the TypeError
         return json.JSONEncoder.default(self, o)
 
+
 def format_ensemble_results(ensemble_dictionary):
     """
     Function to handle all the steps of formatting the ensemble output again.
diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py
index c02e199ea0ea564558871ef7fd3129e3055f3b26..746e62fc5a700cd31632b4babcf61c41b165db13 100644
--- a/binarycpython/utils/functions.py
+++ b/binarycpython/utils/functions.py
@@ -41,7 +41,8 @@ import types
 from typing import Union, Any
 
 import simplejson
-#import orjson
+
+# import orjson
 
 ########################################################
 # Unsorted
@@ -70,22 +71,28 @@ def check_if_in_shell():
 
     return in_shell
 
+
 def timedelta(delta):
     """
     Function to convert a length of time (float, seconds) to a string for
     human-readable output.
     """
     # currently use the humanize module to do this
-    t = humanize.time.precisedelta(dt.timedelta(seconds=delta),format='%0.2f',minimum_unit='milliseconds',suppress=['milliseconds'])
+    t = humanize.time.precisedelta(
+        dt.timedelta(seconds=delta),
+        format="%0.2f",
+        minimum_unit="milliseconds",
+        suppress=["milliseconds"],
+    )
     # and use more compact units
-    t = t.replace(" days and","d")
-    t = t.replace(" hours and","h")
-    t = t.replace(" minutes and","m")
-    t = t.replace(" seconds and","s")
-    t = t.replace(" days","d")
-    t = t.replace(" hours","h")
-    t = t.replace(" minutes","m")
-    t = t.replace(" seconds","s")
+    t = t.replace(" days and", "d")
+    t = t.replace(" hours and", "h")
+    t = t.replace(" minutes and", "m")
+    t = t.replace(" seconds and", "s")
+    t = t.replace(" days", "d")
+    t = t.replace(" hours", "h")
+    t = t.replace(" minutes", "m")
+    t = t.replace(" seconds", "s")
     return t
 
 
@@ -157,6 +164,7 @@ def conv_time_units(t):
         units = "h"
     return (t, units)
 
+
 def bin_data(value, binwidth):
     """
     Function that bins the data
@@ -206,7 +214,6 @@ def get_size(obj, seen=None):
     return size
 
 
-
 def get_moe_di_stefano_dataset(options, verbosity=0):
     """
     Function to get the default Moe and di Stefano dataset or accept a user input.
@@ -406,6 +413,7 @@ def verbose_print(message: str, verbosity: int, minimal_verbosity: int) -> None:
         print(message)
         sys.stdout.flush()
 
+
 def remove_file(file: str, verbosity: int = 0) -> None:
     """
     Function to remove files but with verbosity
@@ -1017,7 +1025,7 @@ def filter_arg_dict(arg_dict: dict) -> dict:
         filtered dictionary (pairs with NULL and Function values are removed)
     """
 
-    return filter_dict(arg_dict.copy(),["NULL", "Function"])
+    return filter_dict(arg_dict.copy(), ["NULL", "Function"])
 
 
 def create_arg_string(
diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index f6897a7c588f09c46e50c571b58d30503be779a1..68b4cb80155cb67ad50a16c86f0cc0ec2660c866 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -40,6 +40,7 @@ import strip_ansi
 import sys
 import time
 import uuid
+
 _count = 0
 from typing import Union, Any
 from collections import (
@@ -62,7 +63,6 @@ from binarycpython.utils.custom_logging_functions import (
     autogen_C_logging_code,
     binary_c_log_code,
     create_and_load_logging_function,
-
 )
 
 from binarycpython.utils.functions import (
@@ -79,7 +79,7 @@ from binarycpython.utils.functions import (
     get_ANSI_colours,
     check_if_in_shell,
     format_number,
-    timedelta
+    timedelta,
 )
 from binarycpython.utils.ensemble import (
     binaryc_json_serializer,
@@ -160,7 +160,9 @@ class Population:
         self.set(**kwargs)
 
         # Load Moe and di Stefano options
-        self.grid_options["Moe2017_options"] = copy.deepcopy(moe_di_stefano_default_options)
+        self.grid_options["Moe2017_options"] = copy.deepcopy(
+            moe_di_stefano_default_options
+        )
         self.custom_options["Moe2017_JSON_data"] = None
 
         # Write MOE2017 options to a file. NOTE: not sure why i put this here anymore
@@ -481,23 +483,22 @@ class Population:
             raise ValueError(msg)
 
     def add_grid_variable(
-            self,
-            name: str,
-            parameter_name: str,
-            longname: str,
-            valuerange: Union[list, str],
-            samplerfunc: str,
-            probdist: str,
-            dphasevol: Union[str, int],
-            gridtype: str = "centred",
-            branchpoint: int = 0,
-            branchcode: Union[str, None] = None,
-            precode: Union[str, None] = None,
-            postcode: Union[str, None] = None,
-            topcode: Union[str, None] = None,
-            bottomcode: Union[str, None] = None,
-            condition: Union[str, None] = None,
-
+        self,
+        name: str,
+        parameter_name: str,
+        longname: str,
+        valuerange: Union[list, str],
+        samplerfunc: str,
+        probdist: str,
+        dphasevol: Union[str, int],
+        gridtype: str = "centred",
+        branchpoint: int = 0,
+        branchcode: Union[str, None] = None,
+        precode: Union[str, None] = None,
+        postcode: Union[str, None] = None,
+        topcode: Union[str, None] = None,
+        bottomcode: Union[str, None] = None,
+        condition: Union[str, None] = None,
     ) -> None:
         """
         Function to add grid variables to the grid_options.
@@ -587,10 +588,10 @@ class Population:
         # Add grid_variable
         grid_variable = {
             "name": name,
-            "parameter_name":parameter_name,
+            "parameter_name": parameter_name,
             "longname": longname,
             "valuerange": valuerange,
-            #"resolution": 0,
+            # "resolution": 0,
             "samplerfunc": samplerfunc,
             "precode": precode,
             "postcode": postcode,
@@ -600,13 +601,22 @@ class Population:
             "gridtype": gridtype,
             "branchpoint": branchpoint,
             "branchcode": branchcode,
-            "topcode" : topcode,
-            "bottomcode" : bottomcode,
+            "topcode": topcode,
+            "bottomcode": bottomcode,
             "grid_variable_number": len(self.grid_options["_grid_variables"]),
         }
 
         # Check for gridtype input
-        if not gridtype in ["edge", "right", "right edge", "left", "left edge", "centred", "centre", "center"]:
+        if not gridtype in [
+            "edge",
+            "right",
+            "right edge",
+            "left",
+            "left edge",
+            "centred",
+            "centre",
+            "center",
+        ]:
             msg = "Unknown gridtype value. Please start another one"
             raise ValueError(msg)
 
@@ -619,7 +629,6 @@ class Population:
             1,
         )
 
-
     ###################################################
     # Return functions
     ###################################################
@@ -816,7 +825,7 @@ class Population:
                 )
             return outfile
 
-    def _boxed(self,*list,colour='yellow on black',boxchar='*',separator="\n"):
+    def _boxed(self, *list, colour="yellow on black", boxchar="*", separator="\n"):
         """
         Function to output a list of strings in a single box.
 
@@ -854,11 +863,11 @@ class Population:
         out = self.ANSI_colours[colour] + header + "\n"
 
         # loop over strings to output, padding as required
-        for n,string in enumerate(strings):
-            if lengths[n]%2 == 1:
-                string = ' ' + string
-            pad = ' ' * int((strlen - lengths[n])/2)
-            out = out + boxchar + ' ' + pad + string + pad + ' ' + boxchar +"\n"
+        for n, string in enumerate(strings):
+            if lengths[n] % 2 == 1:
+                string = " " + string
+            pad = " " * int((strlen - lengths[n]) / 2)
+            out = out + boxchar + " " + pad + string + pad + " " + boxchar + "\n"
         # close output and return
         out = out + header + "\n" + self.ANSI_colours["reset"]
         return out
@@ -1064,7 +1073,6 @@ class Population:
 
         return analytics_dict
 
-
     def _evolve_population(self):
         """
         Function to evolve populations. This handles the setting up, evolving
@@ -1106,29 +1114,32 @@ class Population:
         self.grid_options["_end_time_evolution"] = time.time()
 
         # Log and print some information
-        dtsecs = self.grid_options["_end_time_evolution"] - self.grid_options["_start_time_evolution"]
+        dtsecs = (
+            self.grid_options["_end_time_evolution"]
+            - self.grid_options["_start_time_evolution"]
+        )
         string1 = "Population-{} finished!\nThe total probability is {:g}.".format(
-            self.grid_options["_population_id"],
-            self.grid_options["_probtot"])
+            self.grid_options["_population_id"], self.grid_options["_probtot"]
+        )
         string2 = "It took a total of {dtsecs} to run {starcount} systems on {ncores} cores\n = {totaldtsecs} of CPU time.\nMaximum memory use {memuse:.3f} MB".format(
             dtsecs=timedelta(dtsecs),
             starcount=self.grid_options["_total_starcount"],
             ncores=self.grid_options["num_cores"],
             totaldtsecs=timedelta(dtsecs * self.grid_options["num_cores"]),
-            memuse=sum(self.shared_memory["max_memory_use_per_thread"])
+            memuse=sum(self.shared_memory["max_memory_use_per_thread"]),
         )
-        verbose_print(self._boxed(string1,string2),
-                      self.grid_options["verbosity"],
-                      0)
+        verbose_print(self._boxed(string1, string2), self.grid_options["verbosity"], 0)
 
         if self.grid_options["_errors_found"]:
             # Some information afterwards
             verbose_print(
-                self._boxed("During the run {} failed systems were found\nwith a total probability of {:g}\nwith the following unique error codes: {} ".format(
-                    self.grid_options["_failed_count"],
-                    self.grid_options["_failed_prob"],
-                    self.grid_options["_failed_systems_error_codes"],
-                )),
+                self._boxed(
+                    "During the run {} failed systems were found\nwith a total probability of {:g}\nwith the following unique error codes: {} ".format(
+                        self.grid_options["_failed_count"],
+                        self.grid_options["_failed_prob"],
+                        self.grid_options["_failed_systems_error_codes"],
+                    )
+                ),
                 self.grid_options["verbosity"],
                 0,
             )
@@ -1193,8 +1204,12 @@ class Population:
         for system_number, system_dict in enumerate(generator):
 
             # skip systems before start_at, and apply modulo
-            if(not(system_number >= self.grid_options['start_at'] and
-                   (system_number - self.grid_options['start_at']) % self.grid_options['modulo'] == 0)):
+            if not (
+                system_number >= self.grid_options["start_at"]
+                and (system_number - self.grid_options["start_at"])
+                % self.grid_options["modulo"]
+                == 0
+            ):
                 continue
 
             # Put job in queue
@@ -1380,8 +1395,8 @@ class Population:
             # print("thread {}: persistent_data_memaddr: ".format(self.process_ID), persistent_data_memaddr)
 
         # vb2 logging
-        if self.grid_options['verbosity'] >= 2:
-            self.vb2print(full_system_dict,binary_cmdline_string)
+        if self.grid_options["verbosity"] >= 2:
+            self.vb2print(full_system_dict, binary_cmdline_string)
 
         # Get results binary_c
         # print("running: {}".format(binary_cmdline_string))
@@ -1623,12 +1638,12 @@ class Population:
             # Each new system overrides the previous
             if self.grid_options["log_args"]:
                 with open(
-                        os.path.join(
-                            self.grid_options["log_args_dir"],
-                            "current_system",
-                            "process_{}.txt".format(self.process_ID),
-                        ),
-                        "w",
+                    os.path.join(
+                        self.grid_options["log_args_dir"],
+                        "current_system",
+                        "process_{}.txt".format(self.process_ID),
+                    ),
+                    "w",
                 ) as f:
                     binary_cmdline_string = self._return_argline(full_system_dict)
                     f.write(binary_cmdline_string)
@@ -1664,12 +1679,12 @@ class Population:
             # Debug line: logging all the lines
             if self.grid_options["log_runtime_systems"] == 1:
                 with open(
-                        os.path.join(
-                            self.grid_options["tmp_dir"],
-                            "runtime_systems",
-                            "process_{}.txt".format(self.process_ID),
-                        ),
-                        "a+",
+                    os.path.join(
+                        self.grid_options["tmp_dir"],
+                        "runtime_systems",
+                        "process_{}.txt".format(self.process_ID),
+                    ),
+                    "a+",
                 ) as f:
                     binary_cmdline_string = self._return_argline(full_system_dict)
                     f.write(
@@ -1703,12 +1718,12 @@ class Population:
 
         # Set status to finishing
         with open(
-                os.path.join(
-                    self.grid_options["tmp_dir"],
-                    "process_status",
-                    "process_{}.txt".format(self.process_ID),
-                ),
-                "w",
+            os.path.join(
+                self.grid_options["tmp_dir"],
+                "process_status",
+                "process_{}.txt".format(self.process_ID),
+            ),
+            "w",
         ) as f:
             f.write("FINISHING")
             f.close()
@@ -1744,7 +1759,10 @@ class Population:
                 )
 
             # save the ensemble chunk to a file
-            if self.grid_options["save_ensemble_chunks"] is True or self.grid_options["combine_ensemble_with_thread_joining"] is False:
+            if (
+                self.grid_options["save_ensemble_chunks"] is True
+                or self.grid_options["combine_ensemble_with_thread_joining"] is False
+            ):
 
                 output_file = os.path.join(
                     self.custom_options["data_dir"],
@@ -1754,14 +1772,12 @@ class Population:
                 )
                 verbose_print(
                     "Writing process {} JSON ensemble chunk output to {} ".format(
-                        ID,
-                        output_file
+                        ID, output_file
                     ),
                     self.grid_options["verbosity"],
                     1,
                 )
-                self.write_ensemble(output_file,
-                                    ensemble_raw_output)
+                self.write_ensemble(output_file, ensemble_raw_output)
 
             # combine ensemble chunks
             if self.grid_options["combine_ensemble_with_thread_joining"] is True:
@@ -1778,9 +1794,7 @@ class Population:
         ##########################
         # Clean up and return
         verbose_print(
-            "process {} free memory and return ".format(
-                ID
-            ),
+            "process {} free memory and return ".format(ID),
             self.grid_options["verbosity"],
             1,
         )
@@ -1808,7 +1822,7 @@ class Population:
         end_process_time = datetime.datetime.now()
 
         # thread end message
-        colour = 'cyan on black'
+        colour = "cyan on black"
         verbose_print(
             self._boxed(
                 "{colour}Process {ID} finished:\ngenerator started at {start}\ngenerator finished at {end}\ntotal: {timesecs}\nof which {binary_c_secs} with binary_c\nRan {nsystems} systems\nwith a total probability of {psystems:g}\n{failcolour}This thread had {nfail} failing systems{colour}\n{failcolour}with a total failed probability of {pfail}{colour}\n{zerocolour}Skipped a total of {nzero} zero-probability systems{zeroreset}\n".format(
@@ -1816,18 +1830,30 @@ class Population:
                     ID=ID,
                     start=start_process_time.isoformat(),
                     end=end_process_time.isoformat(),
-                    timesecs=timedelta((end_process_time - start_process_time).total_seconds()),
+                    timesecs=timedelta(
+                        (end_process_time - start_process_time).total_seconds()
+                    ),
                     binary_c_secs=timedelta(total_time_calling_binary_c),
                     nsystems=number_of_systems_run,
                     psystems=probability_of_systems_run,
-                    failcolour=self.ANSI_colours['red'] if self.grid_options["_failed_count"]>0 else '',
-                    failreset=self.ANSI_colours[colour] if self.grid_options["_failed_count"]>0 else '',
+                    failcolour=self.ANSI_colours["red"]
+                    if self.grid_options["_failed_count"] > 0
+                    else "",
+                    failreset=self.ANSI_colours[colour]
+                    if self.grid_options["_failed_count"] > 0
+                    else "",
                     nfail=self.grid_options["_failed_count"],
                     pfail=self.grid_options["_failed_prob"],
                     nzero=zero_prob_stars_skipped,
-                    zerocolour=self.ANSI_colours["yellow"] if zero_prob_stars_skipped>0 else '',
-                    zeroreset=self.ANSI_colours[colour] if zero_prob_stars_skipped>0 else ''
-                ),colour=colour),
+                    zerocolour=self.ANSI_colours["yellow"]
+                    if zero_prob_stars_skipped > 0
+                    else "",
+                    zeroreset=self.ANSI_colours[colour]
+                    if zero_prob_stars_skipped > 0
+                    else "",
+                ),
+                colour=colour,
+            ),
             self.grid_options["verbosity"],
             1,
         )
@@ -1849,32 +1875,30 @@ class Population:
             "zero_prob_stars_skipped": zero_prob_stars_skipped,
         }
         with open(
-                os.path.join(
-                    self.grid_options["tmp_dir"],
-                    "process_summary",
-                    "process_{}.json".format(self.process_ID),
-                ),
-                "w",
+            os.path.join(
+                self.grid_options["tmp_dir"],
+                "process_summary",
+                "process_{}.json".format(self.process_ID),
+            ),
+            "w",
         ) as f:
             f.write(json.dumps(summary_dict, indent=4))
             f.close()
 
         # Set status to finished
         with open(
-                os.path.join(
-                    self.grid_options["tmp_dir"],
-                    "process_status",
-                    "process_{}.txt".format(self.process_ID),
-                ),
-                "w",
+            os.path.join(
+                self.grid_options["tmp_dir"],
+                "process_status",
+                "process_{}.txt".format(self.process_ID),
+            ),
+            "w",
         ) as f:
             f.write("FINISHED")
             f.close()
 
         verbose_print(
-            "process {} queue put output_dict ".format(
-                ID
-            ),
+            "process {} queue put output_dict ".format(ID),
             self.grid_options["verbosity"],
             1,
         )
@@ -1888,9 +1912,7 @@ class Population:
         # TODO: make a cleanup function for the individual threads
         # TODO: make sure this is necessary. Actually its probably not, because we have a centralised queue
         verbose_print(
-            "process {} return ".format(
-                ID
-            ),
+            "process {} return ".format(ID),
             self.grid_options["verbosity"],
             1,
         )
@@ -1948,7 +1970,6 @@ class Population:
             msg = "No actual evolution options passed to the evolve call. Aborting"
             raise ValueError(msg)
 
-
     ############################################################
     def _setup(self):
         """
@@ -2004,7 +2025,7 @@ class Population:
                 raise ValueError
 
             if not any(
-                    [key.startswith("ensemble_filter_") for key in self.bse_options]
+                [key.startswith("ensemble_filter_") for key in self.bse_options]
             ):
                 verbose_print(
                     "Warning: Running the ensemble without any filter requires a lot of available RAM",
@@ -2052,11 +2073,16 @@ class Population:
 
                 verbose_print(
                     self._boxed(
-                        "Total starcount for this run is {starcount}".format(starcount=self.grid_options["_total_starcount"]),
-                        "Total probability is {probtot:g}".format(probtot=self.grid_options["_probtot"])
+                        "Total starcount for this run is {starcount}".format(
+                            starcount=self.grid_options["_total_starcount"]
+                        ),
+                        "Total probability is {probtot:g}".format(
+                            probtot=self.grid_options["_probtot"]
+                        ),
                     ),
                     self.grid_options["verbosity"],
-                    0)
+                    0,
+                )
 
                 if self.grid_options["exit_after_dry_run"]:
                     sys.exit()
@@ -2148,7 +2174,6 @@ class Population:
         # Unload/free custom_logging_code
         # TODO: cleanup custom logging code.
 
-
     ###################################################
     # Grid code functions
     #
@@ -2235,12 +2260,14 @@ class Population:
             # Set some values in the generated code:
             "# Setting initial values\n",
             "_total_starcount = 0\n",
-            "starcounts = [0 for i in range({})]\n".format(total_grid_variables+1),
+            "starcounts = [0 for i in range({})]\n".format(total_grid_variables + 1),
             "probabilities = {}\n",
             "probabilities_list = [0 for i in range({})]\n".format(
-                total_grid_variables+1
+                total_grid_variables + 1
+            ),
+            "probabilities_sum = [0 for i in range({})]\n".format(
+                total_grid_variables + 1
             ),
-            "probabilities_sum = [0 for i in range({})]\n".format(total_grid_variables+1),
             "parameter_dict = {}\n",
             "phasevol = 1\n",
         )
@@ -2263,14 +2290,12 @@ class Population:
         )
 
         for grid_variable_el in sorted(
-                self.grid_options["_grid_variables"].items(),
-                key=lambda x: x[1]["grid_variable_number"],
+            self.grid_options["_grid_variables"].items(),
+            key=lambda x: x[1]["grid_variable_number"],
         ):
             # Make probabilities dict
             grid_variable = grid_variable_el[1]
-            self._add_code(
-                'probabilities["{}"] = 0\n'.format(grid_variable["name"])
-            )
+            self._add_code('probabilities["{}"] = 0\n'.format(grid_variable["name"]))
 
         #################################################################################
         # Start of code generation
@@ -2283,23 +2308,22 @@ class Population:
         # Generate code
         print("Generating grid code")
         for loopnr, grid_variable_el in enumerate(
-                sorted(
-                    self.grid_options["_grid_variables"].items(),
-                    key=lambda x: x[1]["grid_variable_number"],
-                )
+            sorted(
+                self.grid_options["_grid_variables"].items(),
+                key=lambda x: x[1]["grid_variable_number"],
+            )
         ):
             verbose_print(
                 "Constructing/adding: {}".format(grid_variable_el[0]),
-                self.grid_options['verbosity'],
-                2)
+                self.grid_options["verbosity"],
+                2,
+            )
             grid_variable = grid_variable_el[1]
 
             ####################
             # top code
             if grid_variable["topcode"]:
-                self._add_code(
-                    grid_variable["topcode"]
-                )
+                self._add_code(grid_variable["topcode"])
 
             #########################
             # Setting up the for loop
@@ -2343,7 +2367,7 @@ class Population:
                     + "\n"
                 )
 
-            #if grid_variable["resolution"] == 0:
+            # if grid_variable["resolution"] == 0:
             # sample from the sampled_values
             self._add_code(
                 "for {}_sample_number in range(max(0,len(sampled_values_{})-1)):".format(
@@ -2353,7 +2377,7 @@ class Population:
             )
 
             # old code: use resolution passed in
-            #else:
+            # else:
             #    # use resolution passed in
             #    self._add_code(
             #        "for {}_sample_number in range({} if {} != 0 else max(0,len(sampled_values_{})-1)):".format(
@@ -2374,16 +2398,14 @@ class Population:
             self._add_code("else:\n")
             self._add_code(
                 "{}_this = {}_sample_number; ".format(
-                    grid_variable["name"],
-                    grid_variable["name"]
+                    grid_variable["name"], grid_variable["name"]
                 ),
                 indent=1,
             )
             self._add_code(
                 "\n",
                 "{}_next = {}_this + 1".format(
-                    grid_variable["name"],
-                    grid_variable["name"]
+                    grid_variable["name"], grid_variable["name"]
                 ),
                 "\n",
             )
@@ -2393,7 +2415,7 @@ class Population:
             #   if you sample linearly in that thing.
             # if phasevol is <= 0 then we SKIP that whole loop. Its not working then.
             if (
-                    not grid_variable["dphasevol"] == -1
+                not grid_variable["dphasevol"] == -1
             ):  # A method to turn off this calculation and allow a phasevol = 1
                 self._add_code(
                     "phasevol_{} = sampled_values_{}[{}_next]-sampled_values_{}[{}_this]".format(
@@ -2423,7 +2445,7 @@ class Population:
                     grid_variable["name"],
                     grid_variable["name"],
                     grid_variable["name"],
-                    grid_variable["name"]
+                    grid_variable["name"],
                 )
                 + "\n",
                 "continue\n",
@@ -2443,9 +2465,9 @@ class Population:
 
             # select point location based on gridtype (left, centre or right)
             if (
-                    grid_variable["gridtype"] == "edge"
-                    or grid_variable["gridtype"] == "left"
-                    or grid_variable["gridtype"] == "left edge"
+                grid_variable["gridtype"] == "edge"
+                or grid_variable["gridtype"] == "left"
+                or grid_variable["gridtype"] == "left edge"
             ):
                 self._add_code(
                     "{} = sampled_values_{}[{}_this]".format(
@@ -2456,8 +2478,8 @@ class Population:
                     + "\n"
                 )
             elif (
-                    grid_variable["gridtype"] == "right"
-                    or grid_variable["gridtype"] == "right edge"
+                grid_variable["gridtype"] == "right"
+                or grid_variable["gridtype"] == "right edge"
             ):
                 self._add_code(
                     +"{} = sampled_values_{}[{}_next]".format(
@@ -2468,9 +2490,9 @@ class Population:
                     + "\n"
                 )
             elif (
-                    grid_variable["gridtype"] == "centred"
-                    or grid_variable["gridtype"] == "centre"
-                    or grid_variable["gridtype"] == "center"
+                grid_variable["gridtype"] == "centred"
+                or grid_variable["gridtype"] == "centre"
+                or grid_variable["gridtype"] == "center"
             ):
                 self._add_code(
                     "{} = 0.5 * (sampled_values_{}[{}_next] + sampled_values_{}[{}_this])".format(
@@ -2508,7 +2530,7 @@ class Population:
                 )
 
                 # Add condition failed action:
-                if self.grid_options['verbosity'] >= 3:
+                if self.grid_options["verbosity"] >= 3:
                     self._add_code(
                         'print("Grid generator: Condition for {} not met!")'.format(
                             grid_variable["name"]
@@ -2577,7 +2599,6 @@ class Population:
                 + "\n",
             )
 
-
             if grid_variable["grid_variable_number"] == 0:
                 self._add_code(
                     "probabilities_list[0] = d{}".format(grid_variable["name"]) + "\n"
@@ -2608,17 +2629,14 @@ class Population:
             # Increment starcount for this parameter
             self._add_code(
                 "\n",
-                "# Increment starcount for {}\n".format(
-                    grid_variable["name"]
-                ),
+                "# Increment starcount for {}\n".format(grid_variable["name"]),
                 "starcounts[{}] += 1".format(
                     grid_variable["grid_variable_number"],
                 )
                 + "\n",
                 # Add value to dict
                 'parameter_dict["{}"] = {}'.format(
-                    grid_variable["parameter_name"],
-                    grid_variable["parameter_name"]
+                    grid_variable["parameter_name"], grid_variable["parameter_name"]
                 )
                 + "\n",
                 "\n",
@@ -2644,9 +2662,7 @@ class Population:
             ####################
             # bottom code
             if grid_variable["bottomcode"]:
-                self._add_code(
-                    grid_variable["bottomcode"]
-                )
+                self._add_code(grid_variable["bottomcode"])
 
         self._increment_indent_depth(-1)
         self._add_code("\n")
@@ -2741,10 +2757,12 @@ class Population:
         self.grid_options["gridcode_filename"] = gridcode_filename
 
         verbose_print(
-            "{}Writing grid code to {} [dry_run = {}]{}".format(self.ANSI_colours['blue'],
-                                                                gridcode_filename,
-                                                                dry_run,
-                                                                self.ANSI_colours['reset']),
+            "{}Writing grid code to {} [dry_run = {}]{}".format(
+                self.ANSI_colours["blue"],
+                gridcode_filename,
+                dry_run,
+                self.ANSI_colours["reset"],
+            ),
             self.grid_options["verbosity"],
             1,
         )
@@ -2753,28 +2771,30 @@ class Population:
             file.write(self.code_string)
 
         # perhaps create symlink
-        if self.grid_options['symlink latest gridcode']:
+        if self.grid_options["symlink latest gridcode"]:
             global _count
-            symlink = os.path.join(self.grid_options['tmp_dir'],
-                                   'binary_c_grid-latest' + str(_count))
+            symlink = os.path.join(
+                self.grid_options["tmp_dir"], "binary_c_grid-latest" + str(_count)
+            )
             _count += 1
             if os.path.exists(symlink):
                 os.unlink(symlink)
 
             try:
-                os.symlink(gridcode_filename,symlink)
+                os.symlink(gridcode_filename, symlink)
                 verbose_print(
-                    "{}Symlinked grid code to {} {}".format(self.ANSI_colours['blue'],
-                                                            symlink,
-                                                            self.ANSI_colours['reset']),
+                    "{}Symlinked grid code to {} {}".format(
+                        self.ANSI_colours["blue"], symlink, self.ANSI_colours["reset"]
+                    ),
                     self.grid_options["verbosity"],
                     1,
                 )
             except OSError:
                 print("symlink failed")
 
-
-    def _write_gridcode_system_call(self, grid_variable, dry_run, branchpoint, branchcode):
+    def _write_gridcode_system_call(
+        self, grid_variable, dry_run, branchpoint, branchcode
+    ):
         #################################################################################
         # Here are the calls to the queuing or other solution. this part is for every system
         # Add comment
@@ -2782,9 +2802,7 @@ class Population:
         self._add_code("#" * 40 + "\n")
 
         if branchcode:
-            self._add_code(
-                "# Branch code\nif {}:\n",
-                branchcode)
+            self._add_code("# Branch code\nif {}:\n", branchcode)
 
         if branchpoint:
             self._add_code(
@@ -3479,30 +3497,30 @@ class Population:
     # Functions that aren't ordered yet
     ###################################################
 
-    def write_ensemble(self,output_file,data=None,sort_keys=True,indent=4):
+    def write_ensemble(self, output_file, data=None, sort_keys=True, indent=4):
         """
-        write_ensemble : Write ensemble results to a file.
+            write_ensemble : Write ensemble results to a file.
 
-    Args:
-        output_file : the output filename.
+        Args:
+            output_file : the output filename.
 
-                      If the filename has an extension that we recognise,
-                      e.g. .gz or .bz2, we compress the output appropriately.
+                          If the filename has an extension that we recognise,
+                          e.g. .gz or .bz2, we compress the output appropriately.
 
-                      The filename should contain .json or .msgpack, the two
-                      currently-supported formats.
+                          The filename should contain .json or .msgpack, the two
+                          currently-supported formats.
 
-                      Usually you'll want to output to JSON, but we can
-                      also output to msgpack.
+                          Usually you'll want to output to JSON, but we can
+                          also output to msgpack.
 
-        data :   the data dictionary to be converted and written to the file.
-                 If not set, this defaults to self.grid_ensemble_results.
+            data :   the data dictionary to be converted and written to the file.
+                     If not set, this defaults to self.grid_ensemble_results.
 
-        sort_keys : if True, and output is to JSON, the keys will be sorted.
-                    (default: True, passed to json.dumps)
+            sort_keys : if True, and output is to JSON, the keys will be sorted.
+                        (default: True, passed to json.dumps)
 
-        indent : number of space characters used in the JSON indent. (Default: 4,
-                 passed to json.dumps)
+            indent : number of space characters used in the JSON indent. (Default: 4,
+                     passed to json.dumps)
         """
         # TODO: consider writing this in a formatted structure
 
@@ -3517,40 +3535,40 @@ class Population:
             data = self.grid_ensemble_results
 
         if not file_type:
-            print("Unable to determine file type from ensemble filename {} : it should be .json or .msgpack.").format(output_file)
+            print(
+                "Unable to determine file type from ensemble filename {} : it should be .json or .msgpack."
+            ).format(output_file)
             sys.exit()
-        elif file_type is 'JSON':
+        elif file_type is "JSON":
             # JSON output
-            if compression == 'gzip':
+            if compression == "gzip":
                 # gzip
                 f = gzip.open(output_file, "wt")
-            elif compression == 'bzip2':
+            elif compression == "bzip2":
                 # bzip2
                 f = bz2.open(output_file, "wt")
             else:
                 # raw output (not compressed)
                 f = open(output_file, "wt")
-            f.write(json.dumps(data,
-                               sort_keys=sort_keys,
-                               indent=indent))
+            f.write(json.dumps(data, sort_keys=sort_keys, indent=indent))
 
-        elif file_type is 'msgpack':
+        elif file_type is "msgpack":
             # msgpack output
-            if compression == 'gzip':
+            if compression == "gzip":
                 f = gzip.open(output_file, "wb")
-            elif compression == 'bzip2':
+            elif compression == "bzip2":
                 f = bz2.open(output_file, "wb")
             else:
                 f = open(output_file, "wb")
-            msgpack.dump(data,f)
+            msgpack.dump(data, f)
         f.close()
 
         print(
             "Thread {thread}: Wrote ensemble results to file: {colour}{file}{reset} (file type {file_type}, compression {compression})".format(
                 thread=self.process_ID,
                 file=output_file,
-                colour=self.ANSI_colours['green'],
-                reset=self.ANSI_colours['reset'],
+                colour=self.ANSI_colours["green"],
+                reset=self.ANSI_colours["reset"],
                 file_type=file_type,
                 compression=compression,
             )
@@ -3558,10 +3576,10 @@ class Population:
 
     ############################################################
     def write_binary_c_calls_to_file(
-            self,
-            output_dir: Union[str, None] = None,
-            output_filename: Union[str, None] = None,
-            include_defaults: bool = False,
+        self,
+        output_dir: Union[str, None] = None,
+        output_filename: Union[str, None] = None,
+        include_defaults: bool = False,
     ) -> None:
         """
         Function that loops over the grid code and writes the generated parameters to a file.
@@ -3829,10 +3847,11 @@ class Population:
                     if not self.grid_options["_errors_exceeded"]:
                         verbose_print(
                             self._boxed(
-                            "Process {} exceeded the maximum ({}) number of failing systems. Stopped logging them to files now".format(
-                                self.process_ID,
-                                self.grid_options["failed_systems_threshold"],
-                            )),
+                                "Process {} exceeded the maximum ({}) number of failing systems. Stopped logging them to files now".format(
+                                    self.process_ID,
+                                    self.grid_options["failed_systems_threshold"],
+                                )
+                            ),
                             self.grid_options["verbosity"],
                             1,
                         )
@@ -3869,7 +3888,6 @@ class Population:
         if not options:
             options = {}
 
-
         # Take the option dictionary that was given and override.
         options = update_dicts(self.grid_options["Moe2017_options"], options)
         self.grid_options["Moe2017_options"] = copy.deepcopy(options)
@@ -3880,11 +3898,11 @@ class Population:
             exist_ok=True,
         )
         with open(
-                os.path.join(
-                    os.path.join(self.grid_options["tmp_dir"], "moe_distefano"),
-                    "moeopts.dat",
-                ),
-                "w",
+            os.path.join(
+                os.path.join(self.grid_options["tmp_dir"], "moe_distefano"),
+                "moeopts.dat",
+            ),
+            "w",
         ) as f:
             f.write(json.dumps(self.grid_options["Moe2017_options"], indent=4))
             f.close()
@@ -3895,7 +3913,7 @@ class Population:
         """
 
         # Only if the grid is loaded and Moecache contains information
-        if (not self.grid_options["_loaded_Moe2017_data"]): # and not Moecache:
+        if not self.grid_options["_loaded_Moe2017_data"]:  # and not Moecache:
 
             if self.custom_options["Moe2017_JSON_data"]:
                 # Use the existing (perhaps modified) JSON data
@@ -3950,7 +3968,12 @@ class Population:
 
                 current_logperiods = sorted(json_data["log10M1"][logmass]["logP"])
                 if not (logperiods == current_logperiods):
-                    msg = "Period values are not consistent throughout the dataset\logperiods = " + ' '.join(str(x) for x in logperiods) + "\nCurrent periods = " + ' '.join(str(x) for x in current_logperiods)
+                    msg = (
+                        "Period values are not consistent throughout the dataset\logperiods = "
+                        + " ".join(str(x) for x in logperiods)
+                        + "\nCurrent periods = "
+                        + " ".join(str(x) for x in current_logperiods)
+                    )
                     verbose_print(
                         "\tMoe_di_Stefano_2017: {}".format(msg),
                         self.grid_options["verbosity"],
@@ -3975,7 +3998,7 @@ class Population:
 
             # save the logperiods list in the cache:
             # this is used in the renormalization integration
-            Moecache['logperiods'] = logperiods
+            Moecache["logperiods"] = logperiods
 
             # Write to file
             os.makedirs(
@@ -4231,15 +4254,19 @@ class Population:
             name="lnm1",
             parameter_name="M_1",
             longname="Primary mass",
-            samplerfunc=self.grid_options["Moe2017_options"]['samplerfuncs']['M'][0] or
-            "const(np.log({}), np.log({}), {})".format(
+            samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][0]
+            or "const(np.log({}), np.log({}), {})".format(
                 self.grid_options["Moe2017_options"]["ranges"]["M"][0],
                 self.grid_options["Moe2017_options"]["ranges"]["M"][1],
                 self.grid_options["Moe2017_options"]["resolutions"]["M"][0],
             ),
             valuerange=[
-                "np.log({})".format(self.grid_options["Moe2017_options"]["ranges"]["M"][0]),
-                "np.log({})".format(self.grid_options["Moe2017_options"]["ranges"]["M"][1]),
+                "np.log({})".format(
+                    self.grid_options["Moe2017_options"]["ranges"]["M"][0]
+                ),
+                "np.log({})".format(
+                    self.grid_options["Moe2017_options"]["ranges"]["M"][1]
+                ),
             ],
             gridtype="centred",
             dphasevol="dlnm1",
@@ -4269,8 +4296,10 @@ class Population:
                     self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                     self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
                 ],
-                samplerfunc=self.grid_options["Moe2017_options"]['samplerfuncs']["logP"][0] or
-                "const({}, {}, {})".format(
+                samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][
+                    "logP"
+                ][0]
+                or "const({}, {}, {})".format(
                     self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                     self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
                     self.grid_options["Moe2017_options"]["resolutions"]["logP"][0],
@@ -4290,10 +4319,14 @@ qmax=maximum_mass_ratio_for_RLOF(M_1, orbital_period)
                 longname="Mass ratio",
                 valuerange=[
                     self.grid_options["Moe2017_options"]["ranges"]["q"][0]
-                    if self.grid_options["Moe2017_options"].get("ranges", {}).get("q", None)
+                    if self.grid_options["Moe2017_options"]
+                    .get("ranges", {})
+                    .get("q", None)
                     else "options['Mmin']/M_1",
                     self.grid_options["Moe2017_options"]["ranges"]["q"][1]
-                    if self.grid_options["Moe2017_options"].get("ranges", {}).get("q", None)
+                    if self.grid_options["Moe2017_options"]
+                    .get("ranges", {})
+                    .get("q", None)
                     else "qmax",
                 ],
                 probdist=1,
@@ -4303,8 +4336,8 @@ qmax=maximum_mass_ratio_for_RLOF(M_1, orbital_period)
 M_2 = q * M_1
 sep = calc_sep_from_period(M_1, M_2, orbital_period)
     """,
-                samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][1] or
-                "const({}, {}, {})".format(
+                samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][1]
+                or "const({}, {}, {})".format(
                     self.grid_options["Moe2017_options"]["ranges"]["q"][0]
                     if self.grid_options["Moe2017_options"]
                     .get("ranges", {})
@@ -4335,8 +4368,10 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period)
                         ],  # Just fail if not defined.
                         self.grid_options["Moe2017_options"]["ranges"]["ecc"][1],
                     ],
-                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]['ecc'][0] or
-                    "const({}, {}, {})".format(
+                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][
+                        "ecc"
+                    ][0]
+                    or "const({}, {}, {})".format(
                         self.grid_options["Moe2017_options"]["ranges"]["ecc"][
                             0
                         ],  # Just fail if not defined.
@@ -4363,8 +4398,10 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period)
                         self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                         self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
                     ],
-                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["logP"][1] or
-                    "const({}, {}, {})".format(
+                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][
+                        "logP"
+                    ][1]
+                    or "const({}, {}, {})".format(
                         self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                         self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
                         self.grid_options["Moe2017_options"]["resolutions"]["logP"][1],
@@ -4403,8 +4440,10 @@ M_3 = q2 * (M_1 + M_2)
 sep2 = calc_sep_from_period((M_1+M_2), M_3, orbital_period_triple)
 eccentricity2=0
 """,
-                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][2] or
-                    "const({}, {}, {})".format(
+                    samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][
+                        "M"
+                    ][2]
+                    or "const({}, {}, {})".format(
                         self.grid_options["Moe2017_options"]["ranges"]["q"][0]
                         if self.grid_options["Moe2017_options"]
                         .get("ranges", {})
@@ -4435,13 +4474,17 @@ eccentricity2=0
                             ],  # Just fail if not defined.
                             self.grid_options["Moe2017_options"]["ranges"]["ecc"][1],
                         ],
-                        samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["ecc"][1] or
-                        "const({}, {}, {})".format(
+                        samplerfunc=self.grid_options["Moe2017_options"][
+                            "samplerfuncs"
+                        ]["ecc"][1]
+                        or "const({}, {}, {})".format(
                             self.grid_options["Moe2017_options"]["ranges"]["ecc"][
                                 0
                             ],  # Just fail if not defined.
                             self.grid_options["Moe2017_options"]["ranges"]["ecc"][1],
-                            self.grid_options["Moe2017_options"]["resolutions"]["ecc"][1],
+                            self.grid_options["Moe2017_options"]["resolutions"]["ecc"][
+                                1
+                            ],
                         ),
                     )
 
@@ -4462,11 +4505,15 @@ eccentricity2=0
                             self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                             self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
                         ],
-                        samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["logP"][2] or
-                        "const({}, {}, {})".format(
+                        samplerfunc=self.grid_options["Moe2017_options"][
+                            "samplerfuncs"
+                        ]["logP"][2]
+                        or "const({}, {}, {})".format(
                             self.grid_options["Moe2017_options"]["ranges"]["logP"][0],
                             self.grid_options["Moe2017_options"]["ranges"]["logP"][1],
-                            self.grid_options["Moe2017_options"]["resolutions"]["logP"][2],
+                            self.grid_options["Moe2017_options"]["resolutions"]["logP"][
+                                2
+                            ],
                         ),
                         precode="""orbital_period_quadruple = 10.0**log10per3
 q3min={}/(M_3)
@@ -4501,8 +4548,10 @@ M_4 = q3 * M_3
 sep3 = calc_sep_from_period((M_3), M_4, orbital_period_quadruple)
 eccentricity3=0
 """,
-                        samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][3] or
-                        "const({}, {}, {})".format(
+                        samplerfunc=self.grid_options["Moe2017_options"][
+                            "samplerfuncs"
+                        ]["M"][3]
+                        or "const({}, {}, {})".format(
                             self.grid_options["Moe2017_options"]["ranges"]["q"][0]
                             if self.grid_options["Moe2017_options"]
                             .get("ranges", {})
@@ -4518,7 +4567,10 @@ eccentricity3=0
                     )
 
                     # (optional) triples: eccentricity
-                    if self.grid_options["Moe2017_options"]["resolutions"]["ecc"][2] > 0:
+                    if (
+                        self.grid_options["Moe2017_options"]["resolutions"]["ecc"][2]
+                        > 0
+                    ):
                         self.add_grid_variable(
                             name="ecc3",
                             parameter_name="eccentricity3",
@@ -4531,15 +4583,23 @@ eccentricity3=0
                                 self.grid_options["Moe2017_options"]["ranges"]["ecc"][
                                     0
                                 ],  # Just fail if not defined.
-                                self.grid_options["Moe2017_options"]["ranges"]["ecc"][1],
+                                self.grid_options["Moe2017_options"]["ranges"]["ecc"][
+                                    1
+                                ],
                             ],
-                            samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["ecc"][2] or
-                            "const({}, {}, {})".format(
+                            samplerfunc=self.grid_options["Moe2017_options"][
+                                "samplerfuncs"
+                            ]["ecc"][2]
+                            or "const({}, {}, {})".format(
                                 self.grid_options["Moe2017_options"]["ranges"]["ecc"][
                                     0
                                 ],  # Just fail if not defined.
-                                self.grid_options["Moe2017_options"]["ranges"]["ecc"][1],
-                                self.grid_options["Moe2017_options"]["resolutions"]["ecc"][2],
+                                self.grid_options["Moe2017_options"]["ranges"]["ecc"][
+                                    1
+                                ],
+                                self.grid_options["Moe2017_options"]["resolutions"][
+                                    "ecc"
+                                ][2],
                             ),
                         )
 
@@ -4593,34 +4653,34 @@ eccentricity3=0
         """
 
         default_options = {
-            'apply settings' : True,
-            'setup grid' : True,
-            'load data' : True,
-            'clean cache' : False,
-            'clean load flag' : False,
-            'clean all' : False,
-            }
+            "apply settings": True,
+            "setup grid": True,
+            "load data": True,
+            "clean cache": False,
+            "clean load flag": False,
+            "clean all": False,
+        }
         if not options:
             options = {}
-        options = update_dicts(default_options,options)
+        options = update_dicts(default_options, options)
 
         # clean cache?
-        if options['clean all'] or options['clean cache']:
+        if options["clean all"] or options["clean cache"]:
             Moecache.clear()
 
-        if options['clean all'] or options['clean load flag']:
+        if options["clean all"] or options["clean load flag"]:
             self.grid_options["_loaded_Moe2017_data"] = False
 
         # Set the user input
-        if options['apply settings']:
+        if options["apply settings"]:
             self.set_moe_di_stefano_settings(options=options)
 
         # Load the data
-        if options['load data']:
+        if options["load data"]:
             self._load_moe_di_stefano_data()
 
         # construct the grid here
-        if options['setup grid']:
+        if options["setup grid"]:
             self._set_moe_di_stefano_distributions()
 
     def _clean_interpolators(self):
@@ -4667,7 +4727,7 @@ eccentricity3=0
             raise ValueError(msg)
 
         # Go over the chosen options
-        if self.grid_options["multiplicity_fraction_function"] in [1, 'Arenou2010']:
+        if self.grid_options["multiplicity_fraction_function"] in [1, "Arenou2010"]:
             # Arenou 2010 will be used
             verbose_print(
                 "_calculate_multiplicity_fraction: Using Arenou 2010 to calculate multiplicity fractions",
@@ -4683,7 +4743,7 @@ eccentricity3=0
                 4: 0,
             }
 
-        elif self.grid_options["multiplicity_fraction_function"] in [2, 'Raghavan2010']:
+        elif self.grid_options["multiplicity_fraction_function"] in [2, "Raghavan2010"]:
             # Raghavan 2010 will be used
             verbose_print(
                 "_calculate_multiplicity_fraction: Using Raghavan (2010) to calculate multiplicity fractions",
@@ -4699,7 +4759,7 @@ eccentricity3=0
                 4: 0,
             }
 
-        elif self.grid_options["multiplicity_fraction_function"] in [3, 'Moe2017']:
+        elif self.grid_options["multiplicity_fraction_function"] in [3, "Moe2017"]:
             # We need to check several things now here:
 
             # First, are the options for the MOE2017 grid set? On start it is filled with the default settings
@@ -4796,7 +4856,7 @@ eccentricity3=0
         )
 
         # compensate for multithreading and modulo
-        tpr *= self.grid_options['num_cores'] * self.grid_options['modulo']
+        tpr *= self.grid_options["num_cores"] * self.grid_options["modulo"]
 
         if eta_secs < secs_per_day:
             fintime = time.localtime(now + eta_secs)
@@ -4831,7 +4891,7 @@ eccentricity3=0
         # masses
         for i in range(nmult):
             i1 = str(i + 1)
-            if "M_"+i1 in system_dict:
+            if "M_" + i1 in system_dict:
                 system_string += (
                     "M{}=".format(i1) + format_number(system_dict["M_" + i1]) + " "
                 )
@@ -4876,8 +4936,11 @@ eccentricity3=0
         )
 
     def vb2print(self, system_dict, cmdline_string):
-        print("Running this system now on thread {ID}\n{blue}{cmdline}{reset}\n".format(
-            ID=self.process_ID,
-            blue=self.ANSI_colours["blue"],
-            cmdline=cmdline_string,
-            reset=self.ANSI_colours["reset"]))
+        print(
+            "Running this system now on thread {ID}\n{blue}{cmdline}{reset}\n".format(
+                ID=self.process_ID,
+                blue=self.ANSI_colours["blue"],
+                cmdline=cmdline_string,
+                reset=self.ANSI_colours["reset"],
+            )
+        )
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index 6489876263a95d186c8c973140fd25530d823cab..5567bb76aa7d1d8d3ecfe5e9656039165631cf42 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -34,7 +34,7 @@ grid_options_defaults_dict = {
     "multiplicity_fraction_function": 0,  # Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: Moe and di Stefano 2017
     "tmp_dir": temp_dir(),  # Setting the temp dir of the program
     "_main_pid": -1,  # Placeholder for the main process id of the run.
-    "save_ensemble_chunks" : True, # Force the ensemble chunk to be saved even if we are joining a thread (just in case the joining fails)
+    "save_ensemble_chunks": True,  # Force the ensemble chunk to be saved even if we are joining a thread (just in case the joining fails)
     "combine_ensemble_with_thread_joining": True,  # Flag on whether to combine everything and return it to the user or if false: write it to data_dir/ensemble_output_{population_id}_{thread_id}.json
     "_commandline_input": "",
     "log_runtime_systems": 0,  # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
@@ -120,11 +120,11 @@ grid_options_defaults_dict = {
     "_total_mass_run": 0,  # To count the total mass that thread/process has ran
     "_total_probability_weighted_mass_run": 0,  # To count the total mass * probability for each system that thread/process has ran
     "modulo": 1,  # run modulo n of the grid.
-    "start_at": 0, # start at the first model
+    "start_at": 0,  # start at the first model
     ## Grid type evolution
     "_grid_variables": {},  # grid variables
     "gridcode_filename": None,  # filename of gridcode
-    "symlink latest gridcode":True, # symlink to latest gridcode
+    "symlink latest gridcode": True,  # symlink to latest gridcode
     ## Monte carlo type evolution
     # TODO: make MC options
     ## Evolution from source file
@@ -515,8 +515,7 @@ MIN_MASS_BINARY_C = float(
 moe_di_stefano_default_options = {
     # place holder for the JSON data to be used if a file
     # isn't specified
-    "JSON" : None,
-
+    "JSON": None,
     # resolution data
     "resolutions": {
         "M": [
@@ -536,10 +535,10 @@ moe_di_stefano_default_options = {
             0,  # e3 (quadruple eccentricity) currently unused
         ],
     },
-    'samplerfuncs': {
-        'M' : [None, None, None, None],
-        'logP' : [None, None, None],
-        'ecc' : [None, None, None],
+    "samplerfuncs": {
+        "M": [None, None, None, None],
+        "logP": [None, None, None],
+        "ecc": [None, None, None],
     },
     "ranges": {
         # stellar masses (Msun)
diff --git a/binarycpython/utils/spacing_functions.py b/binarycpython/utils/spacing_functions.py
index ac444b31f8c2b36f4f470713a864dcdbf812638a..ac4b7554e9354d491dec9fa92c90c49a76d8db72 100644
--- a/binarycpython/utils/spacing_functions.py
+++ b/binarycpython/utils/spacing_functions.py
@@ -16,9 +16,7 @@ from binarycpython.utils.grid import Population
 
 @functools.lru_cache(maxsize=16)
 def const(
-        min_bound: Union[int, float],
-        max_bound: Union[int, float],
-        steps: int
+    min_bound: Union[int, float], max_bound: Union[int, float], steps: int
 ) -> list:
     """
     Samples a range linearly. Uses numpy linspace.
@@ -33,6 +31,7 @@ def const(
     """
     return np.linspace(min_bound, max_bound, steps + 1)
 
+
 ############################################################
 @functools.lru_cache(maxsize=16)
 def const_ranges(ranges) -> list:
@@ -59,9 +58,10 @@ def const_ranges(ranges) -> list:
 
     masses = np.empty(0)
     for range in ranges:
-        masses = np.append(masses,const(*range))
+        masses = np.append(masses, const(*range))
     return np.unique(masses)
 
+
 ############################################################
 def peak_normalized_gaussian_func(
     x: Union[int, float], mean: Union[int, float], sigma: Union[int, float]
@@ -132,24 +132,27 @@ def gaussian_zoom(
 
     return np.unique(array)
 
+
 @functools.lru_cache(maxsize=16)
-def const_dt(self,
-             dt=1000.0,
-             dlogt=0.1,
-             mmin=0.07,
-             mmax=100.0,
-             nres=1000,
-             logspacing=False,
-             tmin=3.0, # start at 3Myr
-             tmax=None, # use max_evolution_time by default
-             mindm=None, # tuple of tuples
-             maxdm=((0.07,1.0,0.1),(1.0,300.0,1.0)), # tuple of tuples
-             fsample=1.0,
-             factor=1.0,
-             logmasses=False,
-             log10masses=False,
-             showlist=False,
-             showtable=False):
+def const_dt(
+    self,
+    dt=1000.0,
+    dlogt=0.1,
+    mmin=0.07,
+    mmax=100.0,
+    nres=1000,
+    logspacing=False,
+    tmin=3.0,  # start at 3Myr
+    tmax=None,  # use max_evolution_time by default
+    mindm=None,  # tuple of tuples
+    maxdm=((0.07, 1.0, 0.1), (1.0, 300.0, 1.0)),  # tuple of tuples
+    fsample=1.0,
+    factor=1.0,
+    logmasses=False,
+    log10masses=False,
+    showlist=False,
+    showtable=False,
+):
     """
     const_dt returns a list of masses spaced at a constant age difference
 
@@ -197,39 +200,41 @@ def const_dt(self,
     # we don't want a dry run of the grid
     # we want to use the right number of CPU cores
     lifetime_population.set(
-        do_dry_run = False,
-        num_cores = self.grid_options['num_cores'],
+        do_dry_run=False,
+        num_cores=self.grid_options["num_cores"],
         max_stellar_type_1=10,
         save_ensemble_chunks=False,
-        )
+    )
 
     # make a grid in M1
     lifetime_population.add_grid_variable(
         name="lnM_1",
         parameter_name="M_1",
-        longname="log Primary mass", # == single-star mass
-        valuerange=[math.log(mmin),math.log(mmax)],
-        samplerfunc="const(math.log({mmin}),math.log({mmax}),{nres})".format(mmin=mmin,mmax=mmax,nres=nres),
-        probdist="1", # dprob/dm1 : we don't care, so just set it to 1
+        longname="log Primary mass",  # == single-star mass
+        valuerange=[math.log(mmin), math.log(mmax)],
+        samplerfunc="const(math.log({mmin}),math.log({mmax}),{nres})".format(
+            mmin=mmin, mmax=mmax, nres=nres
+        ),
+        probdist="1",  # dprob/dm1 : we don't care, so just set it to 1
         dphasevol="dlnM_1",
         precode="M_1=math.exp(lnM_1)",
         condition="",  # Impose a condition on this grid variable. Mostly for a check for yourself
-        gridtype="edge"
+        gridtype="edge",
     )
 
     # set up the parse function
-    def _parse_function(self,output):
+    def _parse_function(self, output):
         if output:
             for line in output.splitlines():
                 data = line.split()
-                if data[0] == 'SINGLE_STAR_LIFETIME':
+                if data[0] == "SINGLE_STAR_LIFETIME":
                     # append (log10(mass), log10(lifetime)) tuples
                     logm = math.log10(float(data[1]))
                     logt = math.log10(float(data[2]))
-                    #print(line)
-                    #print("logM=",logm,"M=",10.0**logm," -> logt=",logt)
-                    self.grid_results['interpolation table m->t'][logm] = logt
-                    self.grid_results['interpolation table t->m'][logt] = logm
+                    # print(line)
+                    # print("logM=",logm,"M=",10.0**logm," -> logt=",logt)
+                    self.grid_results["interpolation table m->t"][logm] = logt
+                    self.grid_results["interpolation table t->m"][logt] = logm
 
     lifetime_population.set(
         parse_function=_parse_function,
@@ -238,47 +243,43 @@ def const_dt(self,
     # run to build the interpolation table
     print("Running population to make lifetime interpolation table, please wait")
     lifetime_population.evolve()
-    #print("Data table",lifetime_population.grid_results['interpolation table t->m'])
+    # print("Data table",lifetime_population.grid_results['interpolation table t->m'])
 
     # convert to nested lists for the interpolator
     #
     # make time -> mass table
     data_table_time_mass = []
-    times = sorted(lifetime_population.grid_results['interpolation table t->m'].keys())
+    times = sorted(lifetime_population.grid_results["interpolation table t->m"].keys())
     for time in times:
-        mass = lifetime_population.grid_results['interpolation table t->m'][time]
+        mass = lifetime_population.grid_results["interpolation table t->m"][time]
         # we have to make sure the time is monotonic (not guaranteed at high mass)
-        if len(data_table_time_mass)==0:
-            data_table_time_mass.append( [ time, mass ] )
+        if len(data_table_time_mass) == 0:
+            data_table_time_mass.append([time, mass])
         elif mass < data_table_time_mass[-1][1]:
-            data_table_time_mass.append( [ time, mass ] )
+            data_table_time_mass.append([time, mass])
 
     # make mass -> time table
     data_table_mass_time = []
-    masses = sorted(lifetime_population.grid_results['interpolation table m->t'].keys())
+    masses = sorted(lifetime_population.grid_results["interpolation table m->t"].keys())
     for mass in masses:
-        time = lifetime_population.grid_results['interpolation table m->t'][mass]
-        data_table_mass_time.append( [ mass, time ] )
+        time = lifetime_population.grid_results["interpolation table m->t"][mass]
+        data_table_mass_time.append([mass, time])
 
     # set up interpolators
     interpolator_time_mass = py_rinterpolate.Rinterpolate(
-        table = data_table_time_mass,
-        nparams = 1, # mass
-        ndata = 1, # lifetime
-        verbosity = 0)
+        table=data_table_time_mass, nparams=1, ndata=1, verbosity=0  # mass  # lifetime
+    )
     interpolator_mass_time = py_rinterpolate.Rinterpolate(
-        table = data_table_mass_time,
-        nparams = 1, # lifetime
-        ndata = 1, # mass
-        verbosity = 0)
+        table=data_table_mass_time, nparams=1, ndata=1, verbosity=0  # lifetime  # mass
+    )
 
     # function to get a mass given a time (Myr)
     def _mass_from_time(linear_time):
-        return 10.0**interpolator_time_mass.interpolate([math.log10(linear_time)])[0]
+        return 10.0 ** interpolator_time_mass.interpolate([math.log10(linear_time)])[0]
 
     # function to get a time given a mass (Msun)
     def _time_from_mass(mass):
-        return 10.0**interpolator_mass_time.interpolate([math.log10(mass)])[0]
+        return 10.0 ** interpolator_mass_time.interpolate([math.log10(mass)])[0]
 
     # return a unique list
     def _uniq(_list):
@@ -286,10 +287,10 @@ def const_dt(self,
 
     # format a whole list like %g
     def _format(_list):
-        return [ float("{x:g}".format(x=x)) for x in _list]
+        return [float("{x:g}".format(x=x)) for x in _list]
 
     # construct mass list, always include the min and max
-    mass_list = [mmin,mmax]
+    mass_list = [mmin, mmax]
 
     # first, make sure the stars are separated by only
     # maxdm
@@ -306,33 +307,33 @@ def const_dt(self,
                 logrange_min = math.log(range_min)
                 logrange_max = math.log(range_max)
                 while logm <= logmmax:
-                    if logm>=logrange_min and logm<=logrange_max:
+                    if logm >= logrange_min and logm <= logrange_max:
                         mass_list.append(math.exp(logm))
                     logm += dlogm
             else:
                 # use linear scale
                 m = mmin
                 while m <= mmax:
-                    if m>=range_min and m<=range_max:
+                    if m >= range_min and m <= range_max:
                         mass_list.append(m)
                     m += dm
 
     # start time loop at tmax or max_evolution_time
-    t = tmax if tmax else self.bse_options['max_evolution_time']
+    t = tmax if tmax else self.bse_options["max_evolution_time"]
 
     # set default mass list
     if logspacing:
         logt = math.log10(t)
         logtmin = math.log10(tmin)
         while logt > logtmin:
-            m = _mass_from_time(10.0**logt)
+            m = _mass_from_time(10.0 ** logt)
             mass_list.append(m)
             logt = max(logtmin, logt - dlogt * fsample)
     else:
         while t > tmin:
             m = _mass_from_time(t)
             mass_list.append(m)
-            t = max(tmin,t - dt * fsample)
+            t = max(tmin, t - dt * fsample)
 
     # make mass list unique
     mass_list = _uniq(mass_list)
@@ -344,17 +345,17 @@ def const_dt(self,
             mindm = x[2]
             # impose a minimum dm: if two masses in the list
             # are separated by < this, remove the second
-            for index,mass in enumerate(mass_list):
-                if index > 0 and mass>=range_min and mass<=range_max:
-                    dm = mass_list[index] - mass_list[index-1]
+            for index, mass in enumerate(mass_list):
+                if index > 0 and mass >= range_min and mass <= range_max:
+                    dm = mass_list[index] - mass_list[index - 1]
                     if dm < mindm:
-                        mass_list[index-1] = 0.0
+                        mass_list[index - 1] = 0.0
             mass_list = _uniq(mass_list)
             if mass_list[0] == 0.0:
                 mass_list.remove(0.0)
 
     # apply multiplication factor if given
-    if factor and factor!=1.0:
+    if factor and factor != 1.0:
         mass_list = [m * factor for m in mass_list]
 
     # reformat numbers
@@ -364,13 +365,21 @@ def const_dt(self,
     if showtable:
         twas = 0.0
         logtwas = 0.0
-        for i,m in enumerate(mass_list):
+        for i, m in enumerate(mass_list):
             t = _time_from_mass(m)
             logt = math.log10(t)
             if twas > 0.0:
-                print("{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g} dt={dt:13g} dlog10(t)={dlogt:13g}".format(i=i,m=m,t=t,logt=logt,dt=twas-t,dlogt=logtwas-logt))
+                print(
+                    "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g} dt={dt:13g} dlog10(t)={dlogt:13g}".format(
+                        i=i, m=m, t=t, logt=logt, dt=twas - t, dlogt=logtwas - logt
+                    )
+                )
             else:
-                print("{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g}".format(i=i,m=m,t=t,logt=logt))
+                print(
+                    "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g}".format(
+                        i=i, m=m, t=t, logt=logt
+                    )
+                )
             twas = t
             logtwas = logt
         exit()
@@ -385,6 +394,6 @@ def const_dt(self,
         mass_list = np.log10(mass_list)
 
     if showlist:
-        print("const_dt mass list ({} masses)\n".format(len(mass_list)),mass_list)
+        print("const_dt mass list ({} masses)\n".format(len(mass_list)), mass_list)
 
     return mass_list
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 2ed92c07178bdba851ec0d59af3ad0e8e01c7e0d..c023df6e2c4b22c124d133b2fa41d8970f979bda 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -98,6 +98,8 @@ html_static_path = ["_static"]
 import m2r2
 
 current_m2r2_setup = m2r2.setup
+
+
 def patched_m2r2_setup(app):
 
     try:
@@ -111,6 +113,7 @@ def patched_m2r2_setup(app):
         parallel_write_safe=True,
     )
 
+
 m2r2.setup = patched_m2r2_setup
 
 # Generate some custom documentations for this version of binarycpython and binary_c
@@ -129,6 +132,7 @@ print("Done")
 
 # Generate a custom footer
 from functions import write_custom_footer
+
 print("Generating custom footer")
 write_custom_footer()
 print("Done")
diff --git a/docs/source/functions.py b/docs/source/functions.py
index 791b8aa9f84b96ff1aed22c020774227b0f4293a..ca960dbdf465e0b77d861a47751cb0588a4e8454 100644
--- a/docs/source/functions.py
+++ b/docs/source/functions.py
@@ -6,24 +6,26 @@ from binarycpython.utils.functions import call_binary_c_config
 
 from git import Repo
 
+
 def generate_browser_url(ssh_url, branch=None):
     """
     Function to generate the browser url for a git repo
     """
 
-    base = ssh_url.split('@')[-1].replace('.git', '').replace(':','/').strip()
+    base = ssh_url.split("@")[-1].replace(".git", "").replace(":", "/").strip()
 
     if not branch:
-        return 'https://{}'.format(base)
+        return "https://{}".format(base)
     else:
-        return 'https://{}/-/tree/{}'.format(base, branch)
+        return "https://{}/-/tree/{}".format(base, branch)
+
 
 def write_custom_footer():
     """
     Function to write the custom footer to the template file
     """
 
-    TARGET_DIR = './_templates'
+    TARGET_DIR = "./_templates"
 
     output_text = """
 {{% extends '!footer.html' %}}
@@ -37,28 +39,33 @@ Using binary_c with bit branch {binary_c_git_branch}: git revision: {binary_c_gi
 {{% endblock %}}
 """
 
-    binary_c_git_branch = call_binary_c_config('git_branch').strip()
-    binary_c_git_revision = call_binary_c_config('git_revision').strip()
-    binary_c_git_url = generate_browser_url(call_binary_c_config('git_url').strip(), binary_c_git_branch)
+    binary_c_git_branch = call_binary_c_config("git_branch").strip()
+    binary_c_git_revision = call_binary_c_config("git_revision").strip()
+    binary_c_git_url = generate_browser_url(
+        call_binary_c_config("git_url").strip(), binary_c_git_branch
+    )
 
     # for binarycpython git
-    base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+    base_dir = os.path.dirname(
+        os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+    )
     local_repo = Repo(path=base_dir)
 
     binarycpython_git_branch = local_repo.active_branch.name
     binarycpython_git_revision = local_repo.active_branch.commit
-    binarycpython_git_url = generate_browser_url(local_repo.remotes.origin.url, binarycpython_git_branch)
+    binarycpython_git_url = generate_browser_url(
+        local_repo.remotes.origin.url, binarycpython_git_branch
+    )
 
     formatted_text = output_text.format(
-            binarycpython_git_branch=binarycpython_git_branch,
-            binarycpython_git_revision=binarycpython_git_revision,
-            binarycpython_git_url=binarycpython_git_url,
-
-            binary_c_git_branch=binary_c_git_branch,
-            binary_c_git_revision=binary_c_git_revision,
-            binary_c_git_url=binary_c_git_url,
-        ).strip()
+        binarycpython_git_branch=binarycpython_git_branch,
+        binarycpython_git_revision=binarycpython_git_revision,
+        binarycpython_git_url=binarycpython_git_url,
+        binary_c_git_branch=binary_c_git_branch,
+        binary_c_git_revision=binary_c_git_revision,
+        binary_c_git_url=binary_c_git_url,
+    ).strip()
 
     # Write to file
-    with open("_templates/footer.html", 'w') as outfile_filehandle:
+    with open("_templates/footer.html", "w") as outfile_filehandle:
         outfile_filehandle.write(formatted_text)