From 4e78d65fd3fe622abff4ee47cb1ea67e10be97dc Mon Sep 17 00:00:00 2001
From: David Hendriks <davidhendriks93@gmail.com>
Date: Fri, 23 Apr 2021 15:48:49 +0100
Subject: [PATCH] Black profile

---
 binarycpython/tests/test_c_bindings.py        |   6 +-
 binarycpython/tests/test_distributions.py     |  78 +-
 binarycpython/tests/test_functions.py         |   2 -
 binarycpython/tests/test_grid.py              |  15 +-
 binarycpython/tests/test_hpc_functions.py     |   2 +-
 binarycpython/tests/test_plot_functions.py    |   1 +
 .../tests/test_run_system_wrapper.py          |   2 +-
 binarycpython/tests/test_spacing_functions.py |   1 +
 .../utils/custom_logging_functions.py         |   4 +-
 binarycpython/utils/distribution_functions.py | 533 ++++++------
 binarycpython/utils/functions.py              |  30 +-
 binarycpython/utils/grid.py                   | 784 +++++++++---------
 binarycpython/utils/grid_options_defaults.py  | 106 ++-
 binarycpython/utils/useful_funcs.py           |  26 +-
 examples/example_population.py                |  33 +-
 setup.py                                      |   1 +
 16 files changed, 935 insertions(+), 689 deletions(-)

diff --git a/binarycpython/tests/test_c_bindings.py b/binarycpython/tests/test_c_bindings.py
index 579675983..3e9029ad3 100644
--- a/binarycpython/tests/test_c_bindings.py
+++ b/binarycpython/tests/test_c_bindings.py
@@ -21,7 +21,7 @@ from binarycpython.utils.functions import (
     verbose_print,
     extract_ensemble_json_from_string,
     is_capsule,
-    Capturing
+    Capturing,
 )
 
 # https://docs.python.org/3/library/unittest.html
@@ -65,6 +65,7 @@ ensemble_filters_off {8} ensemble_filter_{9} 1 probability 0.1"
 
     return argstring
 
+
 #######################################################################################################################################################
 ### General run_system test
 #######################################################################################################################################################
@@ -115,7 +116,8 @@ class test_run_system(unittest.TestCase):
 ### memaddr test
 #######################################################################################################################################################
 
-# TODO: Make some assertion tests in c 
+# TODO: Make some assertion tests in c
+
 
 class test_return_store_memaddr(unittest.TestCase):
     """
diff --git a/binarycpython/tests/test_distributions.py b/binarycpython/tests/test_distributions.py
index 9a4bd3441..25ab10999 100644
--- a/binarycpython/tests/test_distributions.py
+++ b/binarycpython/tests/test_distributions.py
@@ -8,6 +8,7 @@ from binarycpython.utils.distribution_functions import *
 from binarycpython.utils.useful_funcs import calc_sep_from_period
 from binarycpython.utils.functions import Capturing
 
+
 class TestDistributions(unittest.TestCase):
     """
     Unittest class
@@ -121,7 +122,9 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
+            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
             self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance)
 
         # extra test for k = -1
@@ -155,8 +158,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
         # Extra test:
         # M < M0
@@ -191,8 +198,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
         # Extra tests:
         self.assertEqual(
@@ -226,8 +237,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
         # extra test:
         self.assertEqual(
@@ -345,8 +360,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for logper: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for logper: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
         # Extra test:
         self.assertTrue(
@@ -380,8 +399,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
     def test_raghavan2010_binary_fraction(self):
         with Capturing() as output:
@@ -402,8 +425,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
     def test_Izzard2012_period_distribution(self):
         with Capturing() as output:
@@ -463,8 +490,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass, per: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
     def test_flatsections(self):
         with Capturing() as output:
@@ -494,8 +525,12 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for q: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for q: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
 
     def test_sana12(self):
         with Capturing() as output:
@@ -746,8 +781,13 @@ class TestDistributions(unittest.TestCase):
 
         # GO over the results and check whether they are equal (within tolerance)
         for i in range(len(python_results)):
-            msg = "Error: Value perl: {} Value python: {} for mass, mass2, per: {}".format(python_results[i], perl_results[i], str(input_lists[i]))
-            self.assertLess(np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg)
+            msg = "Error: Value perl: {} Value python: {} for mass, mass2, per: {}".format(
+                python_results[i], perl_results[i], str(input_lists[i])
+            )
+            self.assertLess(
+                np.abs(python_results[i] - perl_results[i]), self.tolerance, msg=msg
+            )
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/binarycpython/tests/test_functions.py b/binarycpython/tests/test_functions.py
index 5f105d00d..b6cb2fe50 100644
--- a/binarycpython/tests/test_functions.py
+++ b/binarycpython/tests/test_functions.py
@@ -45,7 +45,6 @@ class test_verbose_print(unittest.TestCase):
     Unittests for verbose_print
     """
 
-
     def test_print(self):
         with Capturing() as output:
             self._test_print()
@@ -383,7 +382,6 @@ class test_get_arg_keys(unittest.TestCase):
     Unittests for function get_arg_keys
     """
 
-
     def test_1(self):
         with Capturing() as output:
             self._test_1()
diff --git a/binarycpython/tests/test_grid.py b/binarycpython/tests/test_grid.py
index c846ccd7e..3d86d7c0b 100644
--- a/binarycpython/tests/test_grid.py
+++ b/binarycpython/tests/test_grid.py
@@ -887,12 +887,12 @@ class test_grid_evolve(unittest.TestCase):
 
         analytics = test_pop.evolve()
 
-        self.assertTrue(isinstance(test_pop.grid_ensemble_results['ensemble'], dict))
-        self.assertNotEqual(test_pop.grid_ensemble_results['ensemble'], {})
+        self.assertTrue(isinstance(test_pop.grid_ensemble_results["ensemble"], dict))
+        self.assertNotEqual(test_pop.grid_ensemble_results["ensemble"], {})
 
-        self.assertIn("number_counts", test_pop.grid_ensemble_results['ensemble'])
+        self.assertIn("number_counts", test_pop.grid_ensemble_results["ensemble"])
         self.assertNotEqual(
-            test_pop.grid_ensemble_results['ensemble']["number_counts"], {}
+            test_pop.grid_ensemble_results["ensemble"]["number_counts"], {}
         )
 
     def test_grid_evolve_2_threads_with_ensemble_comparing_two_methods(self):
@@ -1004,17 +1004,20 @@ class test_grid_evolve(unittest.TestCase):
 
                 ensemble_output_2 = merge_dicts(ensemble_output_2, ensemble_json)
 
-        for key in ensemble_output_1['ensemble']["number_counts"]["stellar_type"]["0"]:
+        for key in ensemble_output_1["ensemble"]["number_counts"]["stellar_type"]["0"]:
             self.assertIn(key, ensemble_output_2["number_counts"]["stellar_type"]["0"])
 
             # compare values
             self.assertLess(
                 np.abs(
-                    ensemble_output_1['ensemble']["number_counts"]["stellar_type"]["0"][key]
+                    ensemble_output_1["ensemble"]["number_counts"]["stellar_type"]["0"][
+                        key
+                    ]
                     - ensemble_output_2["number_counts"]["stellar_type"]["0"][key]
                 ),
                 1e-8,
             )
 
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/binarycpython/tests/test_hpc_functions.py b/binarycpython/tests/test_hpc_functions.py
index ddba58b7b..e5fe16c34 100644
--- a/binarycpython/tests/test_hpc_functions.py
+++ b/binarycpython/tests/test_hpc_functions.py
@@ -4,4 +4,4 @@ Unittests for hpc_functions module
 
 from binarycpython.utils.hpc_functions import *
 
-# TODO: write tests for hpc functions
\ No newline at end of file
+# TODO: write tests for hpc functions
diff --git a/binarycpython/tests/test_plot_functions.py b/binarycpython/tests/test_plot_functions.py
index c8e9a1304..43b716e95 100644
--- a/binarycpython/tests/test_plot_functions.py
+++ b/binarycpython/tests/test_plot_functions.py
@@ -17,6 +17,7 @@ from binarycpython.utils.functions import Capturing
 #     def test_1(self):
 #         pass
 
+
 class test_color_by_index(unittest.TestCase):
     """
     Unittests for function color_by_index
diff --git a/binarycpython/tests/test_run_system_wrapper.py b/binarycpython/tests/test_run_system_wrapper.py
index 87f0a5e0d..f237693c9 100644
--- a/binarycpython/tests/test_run_system_wrapper.py
+++ b/binarycpython/tests/test_run_system_wrapper.py
@@ -5,4 +5,4 @@ Unittests for run_system_wrapper
 from binarycpython.utils.run_system_wrapper import *
 from binarycpython.utils.functions import *
 
-# TODO: write tests for run_system_wrapper
\ No newline at end of file
+# TODO: write tests for run_system_wrapper
diff --git a/binarycpython/tests/test_spacing_functions.py b/binarycpython/tests/test_spacing_functions.py
index cd50b86a1..bdfbe253e 100644
--- a/binarycpython/tests/test_spacing_functions.py
+++ b/binarycpython/tests/test_spacing_functions.py
@@ -8,6 +8,7 @@ import numpy as np
 from binarycpython.utils.spacing_functions import *
 from binarycpython.utils.functions import *
 
+
 class test_spacing_functions(unittest.TestCase):
     """
     Unit test for spacing functions
diff --git a/binarycpython/utils/custom_logging_functions.py b/binarycpython/utils/custom_logging_functions.py
index f869835b0..c50562b91 100644
--- a/binarycpython/utils/custom_logging_functions.py
+++ b/binarycpython/utils/custom_logging_functions.py
@@ -389,12 +389,12 @@ def create_and_load_logging_function(
     else:
         tmp_dir = custom_tmp_dir
 
-    custom_logging_dir = os.path.join(tmp_dir, 'custom_logging')
+    custom_logging_dir = os.path.join(tmp_dir, "custom_logging")
 
     # Create the subdir for the custom_logging code
     os.makedirs(custom_logging_dir, exist_ok=True)
 
-    # 
+    #
     library_name = os.path.join(
         custom_logging_dir, "libcustom_logging_{}.so".format(uuid.uuid4().hex)
     )
diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py
index 13cf380f2..4756f125a 100644
--- a/binarycpython/utils/distribution_functions.py
+++ b/binarycpython/utils/distribution_functions.py
@@ -30,6 +30,7 @@ from binarycpython.utils.useful_funcs import calc_period_from_sep, calc_sep_from
 LOG_LN_CONVERTER = 1.0 / math.log(10.0)
 distribution_constants = {}  # To store the constants in
 
+
 def prepare_dict(global_dict: dict, list_of_sub_keys: list) -> None:
     """
     Function that makes sure that the global dict is prepared to have a value set there. This dictionary will store values and factors for the distribution functions, so that they dont have to be calculated each time.
@@ -404,8 +405,8 @@ def gaussian(
 
 def Kroupa2001(m: Union[int, float], newopts: dict = None) -> Union[int, float]:
     """
-    Probability distribution function for kroupa 2001 IMF, where the default values to the 
-    three_part_powerlaw are: 
+    Probability distribution function for kroupa 2001 IMF, where the default values to the
+    three_part_powerlaw are:
         default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 100, "p1": -1.3, "p2": -2.3,"p3": -2.3}
 
     Args:
@@ -889,6 +890,8 @@ def flatsections(x: float, opts: dict) -> Union[float, int]:
 
     # print("flatsections gives C={}: y={}\n",c,y)
     return y
+
+
 # print(flatsections(1, [{'min': 0, 'max': 2, 'height': 3}]))
 
 ########################################################################
@@ -899,21 +902,23 @@ def flatsections(x: float, opts: dict) -> Union[float, int]:
 # Star formation histories
 ########################################################################
 
+
 def cosmic_SFH_madau_dickinson2014(z):
     """
     Cosmic star formation history distribution from Madau & Dickonson 2014 (https://arxiv.org/pdf/1403.0007.pdf)
-    
+
     Args:
         z: redshift
-    
+
     Returns:
         Cosmic star formation rate in Solarmass year^-1 megaparsec^-3
     """
 
-    CSFH = 0.015 * ((1+z)**2.7)/(1+(((1+z)/2.9)**5.6))
+    CSFH = 0.015 * ((1 + z) ** 2.7) / (1 + (((1 + z) / 2.9) ** 5.6))
 
     return CSFH
 
+
 ########################################################################
 # Metallicity distributions
 ########################################################################
@@ -936,6 +941,7 @@ import py_rinterpolate
 # Global dictionary to store values in
 Moecache = {}
 
+
 def poisson(lambda_val, n, nmax=None):
     """
     Function that calculates the poisson value and normalizes TODO: improve the description
@@ -943,10 +949,14 @@ def poisson(lambda_val, n, nmax=None):
 
     cachekey = "{} {} {}".format(lambda_val, n, nmax)
 
-    if distribution_constants.get('poisson_cache', None):
-        if distribution_constants['poisson_cache'].get(cachekey, None):
-            p_val = distribution_constants['poisson_cache'][cachekey]
-            print("Found cached value: Poisson ({}, {}, {}) = {}\n".format(lambda_val, n, nmax, p_val))
+    if distribution_constants.get("poisson_cache", None):
+        if distribution_constants["poisson_cache"].get(cachekey, None):
+            p_val = distribution_constants["poisson_cache"][cachekey]
+            print(
+                "Found cached value: Poisson ({}, {}, {}) = {}\n".format(
+                    lambda_val, n, nmax, p_val
+                )
+            )
             return p_val
 
     # Poisson distribution : note, n can be zero
@@ -954,21 +964,22 @@ def poisson(lambda_val, n, nmax=None):
     # nmax is the truncation : if set, we normalize
     # correctly.
     p_val = _poisson(lambda_val, n)
-    
+
     if nmax:
         I_poisson = 0
-        for i in range(nmax+1):
+        for i in range(nmax + 1):
             I_poisson += _poisson(lambda_val, i)
-        p_val = p_val/I_poisson
+        p_val = p_val / I_poisson
 
     # Add to cache
-    if not distribution_constants.get('poisson_cache', None):
-        distribution_constants['poisson_cache'] = {}
-    distribution_constants['poisson_cache'][cachekey] = p_val    
+    if not distribution_constants.get("poisson_cache", None):
+        distribution_constants["poisson_cache"] = {}
+    distribution_constants["poisson_cache"][cachekey] = p_val
 
     # print("Poisson ({}, {}, {}) = {}\n".format(lambda_val, n, nmax, p_val))
     return p_val
 
+
 def _poisson(lambda_val, n):
     """
     Function to return the poisson value
@@ -977,7 +988,6 @@ def _poisson(lambda_val, n):
     return (lambda_val ** n) * np.exp(-lambda_val) / (1.0 * math.factorial(n))
 
 
-
 def Moe_de_Stefano_2017_multiplicity_fractions(options):
     # Returns a list of multiplicity fractions for a given input list of masses
     global Moecache
@@ -986,7 +996,7 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options):
 
     result = {}
 
-    # TODO: decide what to do with this. 
+    # TODO: decide what to do with this.
     # if(0)
     # {
     #     # we have these in a table, so could interpolate, but...
@@ -1004,51 +1014,61 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options):
     # use a Poisson distribution to calculate the fractions
     # (this is more accurate)
     # Set up the multiplicity interpolator
-    if not Moecache.get('rinterpolator_multiplicity', None):
-        Moecache['rinterpolator_multiplicity'] = py_rinterpolate.Rinterpolate(
-            table=Moecache['multiplicity_table'],        # Contains the table of data 
-            nparams=1,    # logM1
-            ndata=4       # The amount of datapoints (the parameters that we want to interpolate)
+    if not Moecache.get("rinterpolator_multiplicity", None):
+        Moecache["rinterpolator_multiplicity"] = py_rinterpolate.Rinterpolate(
+            table=Moecache["multiplicity_table"],  # Contains the table of data
+            nparams=1,  # logM1
+            ndata=4,  # The amount of datapoints (the parameters that we want to interpolate)
         )
 
-    if options['multiplicity_model'] == 'Poisson':
-        multiplicity = Moecache['rinterpolator_multiplicity'].interpolate([np.log10(options['M1'])])[0]
+    if options["multiplicity_model"] == "Poisson":
+        multiplicity = Moecache["rinterpolator_multiplicity"].interpolate(
+            [np.log10(options["M1"])]
+        )[0]
 
         for n in range(4):
-            result[n] = options['multiplicity_modulator'][n] * poisson(multiplicity, n, 3) if options['multiplicity_modulator'][n] > 0 else 0
+            result[n] = (
+                options["multiplicity_modulator"][n] * poisson(multiplicity, n, 3)
+                if options["multiplicity_modulator"][n] > 0
+                else 0
+            )
 
-    elif options['multiplicity_model'] == 'data':
+    elif options["multiplicity_model"] == "data":
         # use the fractions calculated from Moe's data directly
         #
         # note that in this case, there are no quadruples: these
         # are combined with triples
 
         for n in range(3):
-            result[n] = Moecache['rinterpolator_multiplicity'].interpolate([np.log10(options['M1'])])[n+1] * options['multiplicity_modulator'][n]
-        result[3] = 0.0 # no quadruples
-
+            result[n] = (
+                Moecache["rinterpolator_multiplicity"].interpolate(
+                    [np.log10(options["M1"])]
+                )[n + 1]
+                * options["multiplicity_modulator"][n]
+            )
+        result[3] = 0.0  # no quadruples
 
     # Normalisation:
-    if options['normalize_multiplicities'] == 'raw':
+    if options["normalize_multiplicities"] == "raw":
         # do nothing : use raw Poisson predictions
         pass
-    elif options['normalize_multiplicities'] == 'norm':
+    elif options["normalize_multiplicities"] == "norm":
         # simply normalize so all multiplicities add to 1
         sum_result = sum([result[key] for key in result.keys()])
         for key in result.keys():
-            result[key] = result[key]/sum_result
-    elif options['normalize_multiplicities'] == 'merge':
+            result[key] = result[key] / sum_result
+    elif options["normalize_multiplicities"] == "merge":
         # if multiplicity == 1, merge binaries, triples and quads into singles
         #    (de facto this is the same as "norm")
         # if multiplicity == 2, merge triples and quads into binaries
         # if multiplicity == 3, merge quads into triples
         # if multiplicity == 4, do nothing, equivalent to 'raw'.
 
-        # TODO: ask rob about this part. Not sure if i understand it. 
+        # TODO: ask rob about this part. Not sure if i understand it.
 
         multiplicitty = 0
         for n in range(4):
-            if options['multiplicity_modulator'][n] > 0:
+            if options["multiplicity_modulator"][n] > 0:
                 multiplicity = n + 1
         print("Multiplicity: {}: {}".format(multiplicity, str(result)))
 
@@ -1070,24 +1090,29 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options):
             # we have singles, binaries and triples:
             # quads are treated as triples
             result[2] += result[3]
-            result[3] = 0   
+            result[3] = 0
 
         elif multiplicity == 4:
             # we have singles, binaries, triples and quads,
             # so do nothing
             pass
         else:
-            print("Error: in the Moe distribution, we seem to want no stars at all (multiplicity == {}).\n".format(multiplicity))
+            print(
+                "Error: in the Moe distribution, we seem to want no stars at all (multiplicity == {}).\n".format(
+                    multiplicity
+                )
+            )
 
         sum_result = sum([result[key] for key in result.keys()])
         for key in result.keys():
-            result[key] = result[key]/sum_result 
+            result[key] = result[key] / sum_result
 
     print("Multiplicity array: {}".format(str(result)))
 
     # return array reference
     return result
 
+
 def build_q_table(options, m, p):
     ############################################################
     #
@@ -1105,29 +1130,31 @@ def build_q_table(options, m, p):
     # mass is in $opts->{$m}
     # period is  $opts->{$p}
     #
-    ############################################################    
+    ############################################################
 
-    # Since the information from the table for M&S is independent of any choice we make, 
+    # Since the information from the table for M&S is independent of any choice we make,
     # we need to take into account that for example our choice of minimum mass leads to a minimum q_min that is not the same as in the table
     # We should ignore those parts of the table and renormalize. If we are below the lowest value of qmin in the table we need to extrapolate the data
 
     # We can check if we have a cached value for this already:
     # TODO: fix this cache check
     incache = False
-    if Moecache.get('rinterpolator_q_metadata', None):
-        if Moecache['rinterpolator_q_metadata'][m] == options[m]:
-            if Moecache['rinterpolator_q_metadata'] == options[p]:
-                incache = True         
+    if Moecache.get("rinterpolator_q_metadata", None):
+        if Moecache["rinterpolator_q_metadata"][m] == options[m]:
+            if Moecache["rinterpolator_q_metadata"] == options[p]:
+                incache = True
 
-    # 
+    #
     if not incache:
         # trim and/or expand the table to the range $qmin to $qmax.
 
         # qmin is set by the minimum stellar mass : below this
         # the companions are planets
-        qmin = options['ranges']['M'][0] # TODO: this lower range must not be lower than Mmin. However, since the q_min is used as a sample range for the M2, it should use this value I think. discuss
+        qmin = options["ranges"]["M"][
+            0
+        ]  # TODO: this lower range must not be lower than Mmin. However, since the q_min is used as a sample range for the M2, it should use this value I think. discuss
 
-        # TODO: fix that this works. Should depend on metallicity too I think. iirc this function does not work now. 
+        # TODO: fix that this works. Should depend on metallicity too I think. iirc this function does not work now.
         # qmax = maximum_mass_ratio_for_RLOF(options[m], options[p])
         # TODO: change this to the above
         qmax = 1
@@ -1137,15 +1164,15 @@ def build_q_table(options, m, p):
         qdata = []
         can_renormalize = 1
 
-        qeps = 1e-8 # small number but such that qeps+1 != 1
-        if (qeps+1==1.0): 
+        qeps = 1e-8  # small number but such that qeps+1 != 1
+        if qeps + 1 == 1.0:
             printf("qeps (= {}) +1 == 1. Make qeps larger".format(qeps))
 
-        if (qmin >= qmax):
+        if qmin >= qmax:
             # there may be NO binaries in this part of the parameter space:
             # in which case, set up a table with lots of zero in it
 
-            qdata = {0:0, 1:0}
+            qdata = {0: 0, 1: 0}
             can_renormalize = 0
 
         else:
@@ -1156,45 +1183,35 @@ def build_q_table(options, m, p):
             # at the low and high ends
             require_extrapolation = {}
 
-
-            if (qmin >= 0.15):
+            if qmin >= 0.15:
                 # qmin is inside Moe's table : this is easy,
                 # we just keep points from qmin at the low
                 # end to qmax at the high end.
-                require_extrapolation['low'] = 0
-                require_extrapolation['high'] = 1 # TODO: shouldnt the extrapolation need to happen if qmax > 0.95
-                qdata[qmin] = Moecache['rinterpolator_q'].interpolate(
-                    [
-                        np.log10(options[m]),
-                        np.log10(options[p])
-                    ]
+                require_extrapolation["low"] = 0
+                require_extrapolation[
+                    "high"
+                ] = 1  # TODO: shouldnt the extrapolation need to happen if qmax > 0.95
+                qdata[qmin] = Moecache["rinterpolator_q"].interpolate(
+                    [np.log10(options[m]), np.log10(options[p])]
                 )[0]
 
                 for q in np.arange(0.15, 0.950001, 0.1):
                     if (q >= qmin) and (q <= qmax):
-                        qdata[q] = Moecache['rinterpolator_q'].interpolate(
-                            [
-                                np.log10(options[m]),
-                                np.log10(options[p]),
-                                q
-                            ]
+                        qdata[q] = Moecache["rinterpolator_q"].interpolate(
+                            [np.log10(options[m]), np.log10(options[p]), q]
                         )[0]
             else:
-                require_extrapolation['low'] = 1
-                require_extrapolation['high'] = 1 
-                if (qmax < 0.15):
+                require_extrapolation["low"] = 1
+                require_extrapolation["high"] = 1
+                if qmax < 0.15:
                     # qmax < 0.15 which is off the edge
                     # of the table. In this case, choose
                     # two points at q=0.15 and 0.16 and interpolate
                     # at these in case we want to extrapolate.
                     for q in [0.15, 0.16]:
-                        qdata[q] = Moecache['rinterpolator_q'].interpolate(
-                            [
-                                np.log10(options[m]),
-                                np.log10(options[p]),
-                                q
-                            ]
-                        )[0]                                            
+                        qdata[q] = Moecache["rinterpolator_q"].interpolate(
+                            [np.log10(options[m]), np.log10(options[p]), q]
+                        )[0]
                 else:
                     # qmin < 0.15 and qmax > 0.15, so we
                     # have to generate Moe's table for
@@ -1203,25 +1220,21 @@ def build_q_table(options, m, p):
                     # to obtain the q distribution data.
 
                     for q in np.arange(0.15, np.min(0.950001, qmax + 0.0001), 0.1):
-                        qdata[q] = Moecache['rinterpolator_q'].interpolate(
-                            [
-                                np.log10(options[m]),
-                                np.log10(options[p]),
-                                q
-                            ]
+                        qdata[q] = Moecache["rinterpolator_q"].interpolate(
+                            [np.log10(options[m]), np.log10(options[p]), q]
                         )[0]
-                
+
                 # just below qmin, if qmin>qeps, we want nothing
-                if (qmin - 0.15 > qeps):
+                if qmin - 0.15 > qeps:
                     q = qmin - qeps
                     qdata[q] = 0
-                    require_extrapolation['low'] = 0
+                    require_extrapolation["low"] = 0
 
             # just above qmax, if qmax<1, we want nothing
-            if (qmax < 0.95):
+            if qmax < 0.95:
                 q = qmax + qeps
                 qdata[q] = 0
-                require_extrapolation['high'] = 0
+                require_extrapolation["high"] = 0
 
             # sorted list of qs
             qs = sorted(qdata.keys())
@@ -1242,17 +1255,16 @@ def build_q_table(options, m, p):
                     qs[1] = 1.0
                     qdata[qs[1]] = qs[0]
 
-
             else:
-                for pre in ['low', 'high']:
-                    if (require_extrapolation[pre]==0):
+                for pre in ["low", "high"]:
+                    if require_extrapolation[pre] == 0:
                         continue
                     else:
-                        sign = -1 if pre == 'low' else 1
-                        end_index = 0 if pre == 'low' else len(qs)-1
-                        indices = [0, 1] if pre == 'low' else [len(qs)-1, len(qs)-2]
-                        method = options.get('q_{}_extrapolation_method', None)
-                        qlimit = qmin if pre == 'log' else qmax
+                        sign = -1 if pre == "low" else 1
+                        end_index = 0 if pre == "low" else len(qs) - 1
+                        indices = [0, 1] if pre == "low" else [len(qs) - 1, len(qs) - 2]
+                        method = options.get("q_{}_extrapolation_method", None)
+                        qlimit = qmin if pre == "log" else qmax
 
                         print("Q: {} method: {}".format(pre, method))
                         print("indices: {}".format(indices))
@@ -1262,39 +1274,56 @@ def build_q_table(options, m, p):
                         # truncate the distribution
                         qdata[max(0.0, min(1.0, qlimit + sign * qeps))] = 0
 
-                        if method==None:
+                        if method == None:
                             # no extrapolation : just interpolate between 0.10 and 0.95
                             continue
-                        elif method=='flat':
+                        elif method == "flat":
                             # use the end value value and extrapolate it
                             # with zero slope
                             qdata[qlimit] = qdata[qs[end_index]]
-                        elif method=='linear':
+                        elif method == "linear":
                             # linear extrapolation
                             print("Linear 2 {}".format(pre))
                             dq = qs[indices[1]] - qs[indices[0]]
 
-                            if dq==0:
+                            if dq == 0:
                                 # No change
                                 print("dq = 0")
                                 qdata[qlimit] = qs[end_index]
                             else:
-                                slope = (qdata[qs[indices[1]]] - qdata[qs[indices[0]]])/dq
-                                intercept = qdata[qs[indices[0]]] - slope * qs[indices[0]]
+                                slope = (
+                                    qdata[qs[indices[1]]] - qdata[qs[indices[0]]]
+                                ) / dq
+                                intercept = (
+                                    qdata[qs[indices[0]]] - slope * qs[indices[0]]
+                                )
                                 qdata[qlimit] = max(0.0, slope * qlimit + intercept)
-                                print("Slope: {} intercept: {} dn/dq({}) = {}".format(slope, intercept, qlimit, qdata[qlimit]))
-                        elif method=='plaw2':
-                            newq = 0.05;
+                                print(
+                                    "Slope: {} intercept: {} dn/dq({}) = {}".format(
+                                        slope, intercept, qlimit, qdata[qlimit]
+                                    )
+                                )
+                        elif method == "plaw2":
+                            newq = 0.05
                             # use a power-law extrapolation down to q=0.05, if possible
-                            if (qdata[qs[indices[0]]] == 0) and (qdata[qs[indices[1]]] == 0.0):
+                            if (qdata[qs[indices[0]]] == 0) and (
+                                qdata[qs[indices[1]]] == 0.0
+                            ):
                                 # not possible
                                 qdata[newq] = 0
                             else:
-                                slope = (np.log10(qdata[qs[indices[1]]]) - np.log10(qdata[qs[indices[0]]]))/(np.log10(qs[indices[1]])-np.log10(qs[indices[0]]))
-                                intercept = np.log10(qdata[qs[indices[0]]]) - slope * log10(qs[indices[0]])
+                                slope = (
+                                    np.log10(qdata[qs[indices[1]]])
+                                    - np.log10(qdata[qs[indices[0]]])
+                                ) / (
+                                    np.log10(qs[indices[1]]) - np.log10(qs[indices[0]])
+                                )
+                                intercept = np.log10(
+                                    qdata[qs[indices[0]]]
+                                ) - slope * log10(qs[indices[0]])
                                 qdata[newq] = slope * newq + intercept
 
-                        elif method=='nolowq':
+                        elif method == "nolowq":
                             newq = 0.05
                             qdata[newq] = 0
                         else:
@@ -1359,10 +1388,8 @@ def build_q_table(options, m, p):
             tmp_table.append([q, qdata[q]])
 
         # Make an interpolation table to contain our modified data
-        q_interpolator =  py_rinterpolate.Rinterpolate(
-            table=tmp_table,        # Contains the table of data 
-            nparams=1,    # q
-            ndata=1       # 
+        q_interpolator = py_rinterpolate.Rinterpolate(
+            table=tmp_table, nparams=1, ndata=1  # Contains the table of data  # q  #
         )
         # TODO: build a check in here to see if the interpolator build was successful
 
@@ -1375,39 +1402,35 @@ def build_q_table(options, m, p):
 
             # integrate: note that the value of the integral is
             # meaningless to within a factor (which depends on $dq)
-            for q in np.arange(0, 1+ 2e-6, dq):
-                x = q_interpolator.interpolate(
-                    [q]
-                )
-                if len(x)==0:
+            for q in np.arange(0, 1 + 2e-6, dq):
+                x = q_interpolator.interpolate([q])
+                if len(x) == 0:
                     print("Q interpolator table interpolation failed")
                     print("tmp_table = {}".format(str(tmp_table)))
                     print("q_data = {}".format(str(qdata)))
                     raise ValueError
                 else:
-                    I += x[0]*dq
+                    I += x[0] * dq
                     print("dn/dq ({}) = {}".format())
 
             if I > 0:
                 # normalize to 1.0 by dividing the data by 1.0/$I
-                q_interpolator.multiply_table_column(1, 1.0/I)
+                q_interpolator.multiply_table_column(1, 1.0 / I)
 
                 # test this
                 I = 0
-                for q in np.arange(0, 1+ 2e-6, dq):
-                    I += q_interpolator.interpolate(
-                    [q]
-                )[0] * dq
+                for q in np.arange(0, 1 + 2e-6, dq):
+                    I += q_interpolator.interpolate([q])[0] * dq
                 print("Q integral: {}, {}".format(I, q_interpolator))
 
                 # fail if error in integral > 1e-6 (should be ~ machine precision)
-                if (abs(1.0 - I) > 1e-6):
+                if abs(1.0 - I) > 1e-6:
                     print("Error: > 1e-6 in q probability integral: {}".format(I))
 
         # set this new table in the cache
-        Moecache['rinterpolator_q_given_{}_log10{}'.format(m, p)] = q_interpolator
-        Moecache['rinterpolator_q_metadata'][m] = options[m]
-        Moecache['rinterpolator_q_metadata'][p] = options[p]
+        Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator
+        Moecache["rinterpolator_q_metadata"][m] = options[m]
+        Moecache["rinterpolator_q_metadata"][p] = options[p]
 
 
 def Moe_de_Stefano_2017_pdf(options):
@@ -1423,24 +1446,24 @@ def Moe_de_Stefano_2017_pdf(options):
     # mmax => maximum allowed stellar mass (default 80.0)
     #
 
-    prob = [] # Value that we will return
+    prob = []  # Value that we will return
 
-    multiplicity = options['multiplicity']
-    if not options.get('multiplicity', None):
+    multiplicity = options["multiplicity"]
+    if not options.get("multiplicity", None):
         multiplicity = 1
         for n in range(2, 5):
-            multiplicity += 1 if options.get('M{}'.format(n), None) else 0
+            multiplicity += 1 if options.get("M{}".format(n), None) else 0
     else:
-        multiplicity = options['multiplicity']
+        multiplicity = options["multiplicity"]
 
     # immediately return 0 if the multiplicity modulator is 0
-    if options['multiplicity_modulator'][multiplicity - 1] == 0:
+    if options["multiplicity_modulator"][multiplicity - 1] == 0:
         print("_pdf ret 0 because of mult mod\n")
         return 0
 
     ############################################################
-    # multiplicity fraction    
-    prob.append(Moe_de_Stefano_2017_multiplicity_fractions(options)[multiplicity-1])
+    # multiplicity fraction
+    prob.append(Moe_de_Stefano_2017_multiplicity_fractions(options)[multiplicity - 1])
 
     ############################################################
     # always require an IMF for the primary star
@@ -1449,22 +1472,24 @@ def Moe_de_Stefano_2017_pdf(options):
     # (dlnM = dM/M, so 1/dlnM = M/dM)
 
     # TODO: Create an n-part-powerlaw method that can have breakpoints and slopes. I'm using a three-part powerlaw now.
-    prob.append(Kroupa2001(options['M1']) * options['M1'])
+    prob.append(Kroupa2001(options["M1"]) * options["M1"])
 
-    if(multiplicity >= 2):
+    if multiplicity >= 2:
 
         # Separation of the inner binary
-        options['sep'] = calc_sep_from_period(options['M1'], options['M2'], options['P'])
+        options["sep"] = calc_sep_from_period(
+            options["M1"], options["M2"], options["P"]
+        )
 
         # Total mass inner binary:
-        options['M1+M2'] = options['M1'] + options['M2']
+        options["M1+M2"] = options["M1"] + options["M2"]
 
         # binary, triple or quadruple system
         if not Moecache.get("rinterpolator_log10P", None):
-            Moecache['rinterpolator_log10P'] = py_rinterpolate.Rinterpolate(
-                table=Moecache['period_distributions'],        # Contains the table of data 
-                nparams=2,    # log10M, log10P
-                ndata=2       # binary, triple
+            Moecache["rinterpolator_log10P"] = py_rinterpolate.Rinterpolate(
+                table=Moecache["period_distributions"],  # Contains the table of data
+                nparams=2,  # log10M, log10P
+                ndata=2,  # binary, triple
             )
 
         # TODO: fix this function
@@ -1497,11 +1522,8 @@ def Moe_de_Stefano_2017_pdf(options):
         # }
 
         prob.append(
-            Moecache['rinterpolator_log10P'].interpolate(
-                 [
-                  np.log10(options['M1']),
-                  np.log10(options['P'])
-                 ]                
+            Moecache["rinterpolator_log10P"].interpolate(
+                [np.log10(options["M1"]), np.log10(options["P"])]
             )[0]
         )
 
@@ -1514,28 +1536,28 @@ def Moe_de_Stefano_2017_pdf(options):
         # Make a table storing Moe's data for q distributions
 
         if not Moecache.get("rinterpolator_q", None):
-            Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate(
-                table=Moecache['q_distributions'],        # Contains the table of data 
-                nparams=3,    # log10M, log10P, q
-                ndata=1       # 
+            Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate(
+                table=Moecache["q_distributions"],  # Contains the table of data
+                nparams=3,  # log10M, log10P, q
+                ndata=1,  #
             )
 
         # Build the table for q
-        primary_mass = options['M1']
-        secondary_mass = options['M2']
+        primary_mass = options["M1"]
+        secondary_mass = options["M2"]
         m_label = "M1"
         p_label = "P"
 
         build_q_table(options, m_label, p_label)
         prob.append(
-            Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate(
-                    [secondary_mass/primary_mass]
-                )[0]
+            Moecache[
+                "rinterpolator_q_given_{}_log10{}".format(m_label, p_label)
+            ].interpolate([secondary_mass / primary_mass])[0]
         )
 
         # TODO add eccentricity calculations
 
-        if (multiplicity >= 3):
+        if multiplicity >= 3:
             # triple or quadruple system
 
             ############################################################
@@ -1549,10 +1571,12 @@ def Moe_de_Stefano_2017_pdf(options):
             # stars 1 and 2
 
             # TODO: Is this a correct assumption?
-            max_sep = 10.0 * options['sep'] * (1.0 + options['ecc']) # TODO: isnt this the minimal separation?
-            min_P2 = calc_period_from_sep(options['M1+M2'], options['mmin'], max_sep)
+            max_sep = (
+                10.0 * options["sep"] * (1.0 + options["ecc"])
+            )  # TODO: isnt this the minimal separation?
+            min_P2 = calc_period_from_sep(options["M1+M2"], options["mmin"], max_sep)
 
-            if (options['P2'] < min_P2):
+            if options["P2"] < min_P2:
                 # period is too short : system is not hierarchical
                 prob.append(0)
             else:
@@ -1560,41 +1584,39 @@ def Moe_de_Stefano_2017_pdf(options):
 
                 # hence the separation between the outer star
                 # and inner binary
-                options['sep2'] = calc_sep_from_period(options['M3'], options['M1+M2'], options['P2'])          
+                options["sep2"] = calc_sep_from_period(
+                    options["M3"], options["M1+M2"], options["P2"]
+                )
 
                 if not Moecache.get("rinterpolator_log10P2", None):
-                    Moecache['rinterpolator_log10P2'] = py_rinterpolate.Rinterpolate(
-                        table=Moecache['period_distributions'],        # Contains the table of data 
-                        nparams=2,    # log10(M1+M2), log10P # TODO: Really? Is the first column of that table M1+M2?  
-                        ndata=2       # binary, triple
-                    )       
+                    Moecache["rinterpolator_log10P2"] = py_rinterpolate.Rinterpolate(
+                        table=Moecache[
+                            "period_distributions"
+                        ],  # Contains the table of data
+                        nparams=2,  # log10(M1+M2), log10P # TODO: Really? Is the first column of that table M1+M2?
+                        ndata=2,  # binary, triple
+                    )
 
-                if not Moecache.get('P2_integrals', None):
-                    Moecache['P2_integrals'] = {}
+                if not Moecache.get("P2_integrals", None):
+                    Moecache["P2_integrals"] = {}
 
-                if not Moecache['P2_integrals'].get(options['M1+M2'], None):
+                if not Moecache["P2_integrals"].get(options["M1+M2"], None):
                     # normalize because $min_per > 0 so not all the periods
                     # should be included
                     I_p2 = 0
                     dlogP2 = 1e-3
                     for logP2 in np.arange(np.log10(min_P2), 10, dlogP2):
                         # dp_dlogP2 = dp / dlogP
-                        dp_dlogP2 = Moecache['rinterpolator_log10P2'].interpolate(
-                            [
-                                np.log10(options['M1']),
-                                logP2
-                            ]
+                        dp_dlogP2 = Moecache["rinterpolator_log10P2"].interpolate(
+                            [np.log10(options["M1"]), logP2]
                         )[0]
                         I_p2 += dp_dlogP2 * dlogP2
-                    Moecache['P2_integrals'][options['M1+M2']] = I_p2
+                    Moecache["P2_integrals"][options["M1+M2"]] = I_p2
 
-                p_val = Moecache['rinterpolator_log10P'].interpolate(
-                    [
-                        np.log10(options['M1+M2']),
-                        np.log10(options['P2'])
-                    ]
+                p_val = Moecache["rinterpolator_log10P"].interpolate(
+                    [np.log10(options["M1+M2"]), np.log10(options["P2"])]
                 )[0]
-                p_val = p_val / Moecache['P2_integrals']
+                p_val = p_val / Moecache["P2_integrals"]
                 prob.append(p_val)
 
                 ############################################################
@@ -1604,31 +1626,31 @@ def Moe_de_Stefano_2017_pdf(options):
                 # subject to qmin = Mmin/(M1+M2)
                 #
                 # Make a table storing Moe's data for q distributions
-                # TODO: Check if this is correct. 
+                # TODO: Check if this is correct.
                 if not Moecache.get("rinterpolator_q", None):
-                    Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate(
-                        table=Moecache['q_distributions'],        # Contains the table of data 
-                        nparams=3,    # log10(M1+M2), log10P2, q
-                        ndata=1       # 
+                    Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate(
+                        table=Moecache["q_distributions"],  # Contains the table of data
+                        nparams=3,  # log10(M1+M2), log10P2, q
+                        ndata=1,  #
                     )
 
                 # Build the table for q2
-                primary_mass = options['M1+M2']
-                secondary_mass = options['M3']
+                primary_mass = options["M1+M2"]
+                secondary_mass = options["M3"]
                 m_label = "M1+M2"
                 p_label = "P2"
 
                 build_q_table(options, m_label, p_label)
                 prob.append(
-                    Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate(
-                            [secondary_mass/primary_mass]
-                        )[0]
+                    Moecache[
+                        "rinterpolator_q_given_{}_log10{}".format(m_label, p_label)
+                    ].interpolate([secondary_mass / primary_mass])[0]
                 )
 
                 # TODO: ecc2
 
-                if(multiplicity == 4):
-                    # quadruple system. 
+                if multiplicity == 4:
+                    # quadruple system.
                     # TODO: Ask Rob about the strructure of the quadruple. Is htis only double binary quadrupples?
 
                     ############################################################
@@ -1638,20 +1660,26 @@ def Moe_de_Stefano_2017_pdf(options):
                     # as for any other stars but Pmax must be such that
                     # sep3 < sep2 * 0.2
 
-                    max_sep3 = 0.2 * options['sep2'] * (1.0 + opts['ecc2'])
-                    max_per3 = calc_period_from_sep(options['M1+M2'], options['mmin'], max_sep3)
+                    max_sep3 = 0.2 * options["sep2"] * (1.0 + opts["ecc2"])
+                    max_per3 = calc_period_from_sep(
+                        options["M1+M2"], options["mmin"], max_sep3
+                    )
 
                     if not Moecache.get("rinterpolator_log10P2", None):
-                        Moecache['rinterpolator_log10P2'] = py_rinterpolate.Rinterpolate(
-                            table=Moecache['period_distributions'],        # Contains the table of data 
-                            nparams=2,    # log10(M1+M2), log10P3
-                            ndata=2       # binary, triple
+                        Moecache[
+                            "rinterpolator_log10P2"
+                        ] = py_rinterpolate.Rinterpolate(
+                            table=Moecache[
+                                "period_distributions"
+                            ],  # Contains the table of data
+                            nparams=2,  # log10(M1+M2), log10P3
+                            ndata=2,  # binary, triple
                         )
 
-                    if not Moecache.get('P2_integrals', None):
-                        Moecache['P2_integrals'] = {}
+                    if not Moecache.get("P2_integrals", None):
+                        Moecache["P2_integrals"] = {}
 
-                    if not Moecache['P2_integrals'].get(options['M1+M2'], None):
+                    if not Moecache["P2_integrals"].get(options["M1+M2"], None):
                         # normalize because $min_per > 0 so not all the periods
                         # should be included
                         I_p2 = 0
@@ -1659,24 +1687,18 @@ def Moe_de_Stefano_2017_pdf(options):
 
                         for logP2 in np.arange(np.log10(min_P2), 10, dlogP2):
                             # dp_dlogP2 = dp / dlogP
-                            dp_dlogP2 = Moecache['rinterpolator_log10P2'].interpolate(
-                                [
-                                    np.log10(options['M1']),
-                                    logP2
-                                ]
+                            dp_dlogP2 = Moecache["rinterpolator_log10P2"].interpolate(
+                                [np.log10(options["M1"]), logP2]
                             )[0]
                             I_p2 += dp_dlogP2 * dlogP2
-                        Moecache['P2_integrals'][options['M1+M2']] = I_p2
+                        Moecache["P2_integrals"][options["M1+M2"]] = I_p2
 
                     # TODO: should this really be the log10P interpolator?
-                    p_val = Moecache['rinterpolator_log10P'].interpolate(
-                        [
-                            np.log10(options['M1+M2']),
-                            np.log10(options['P2'])
-                        ]
+                    p_val = Moecache["rinterpolator_log10P"].interpolate(
+                        [np.log10(options["M1+M2"]), np.log10(options["P2"])]
                     )[0]
 
-                    p_val = p_val / Moecache['P2_integrals']
+                    p_val = p_val / Moecache["P2_integrals"]
                     prob.append(p_val)
 
                     ############################################################
@@ -1685,65 +1707,94 @@ def Moe_de_Stefano_2017_pdf(options):
                     # we need to construct the q table for the given M1
                     # subject to qmin = Mmin/(M1+M2)
                     # Make a table storing Moe's data for q distributions
-                    # TODO: Check if this is correct. 
+                    # TODO: Check if this is correct.
                     if not Moecache.get("rinterpolator_q", None):
-                        Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate(
-                            table=Moecache['q_distributions'],        # Contains the table of data 
-                            nparams=3,    # log10(M1+M2), log10P2, q
-                            ndata=1       # 
+                        Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate(
+                            table=Moecache[
+                                "q_distributions"
+                            ],  # Contains the table of data
+                            nparams=3,  # log10(M1+M2), log10P2, q
+                            ndata=1,  #
                         )
 
                     # Build the table for q2
-                    primary_mass = options['M1+M2']
-                    secondary_mass = options['M3']
+                    primary_mass = options["M1+M2"]
+                    secondary_mass = options["M3"]
                     m_label = "M1+M2"
                     p_label = "P2"
 
                     build_q_table(options, m_label, p_label)
                     prob.append(
-                        Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate(
-                                [secondary_mass/primary_mass]
-                            )[0]
+                        Moecache[
+                            "rinterpolator_q_given_{}_log10{}".format(m_label, p_label)
+                        ].interpolate([secondary_mass / primary_mass])[0]
                     )
 
                     # todo ecc 3
 
-    elif (multiplicity not in range(1, 5)):
+    elif multiplicity not in range(1, 5):
         print("Unknown multiplicity {}\n".format(multiplicity))
 
     # TODO: Translate this to get some better output
-    #printf "PDFPROBS (M1=%g, logP=%g q=%g) : @prob\n",
+    # printf "PDFPROBS (M1=%g, logP=%g q=%g) : @prob\n",
     #    $opts->{'M1'},
     #    $multiplicity == 2 ? ($opts->{'M2'}/$opts->{'M1'}) : -999,
     #    $multiplicity == 2 ? log10($opts->{'P'}) : -999;
 
     # the final probability density is the product of all the
     # probability density functions
-    # TODO: Where do we take into account the stepsize? probdens is not a probability if we dont take a stepsize 
+    # TODO: Where do we take into account the stepsize? probdens is not a probability if we dont take a stepsize
     prob_dens = 1 * np.prod(prob)
 
     print_info = 1
     if print_info:
         print("Probability density")
-        if multiplicity==1:
-            print("M1={} q=N/A log10P=N/A ({}): {} -> {}\n".format(
-                    options['M1'], len(prob), str(prob), prob_dens
+        if multiplicity == 1:
+            print(
+                "M1={} q=N/A log10P=N/A ({}): {} -> {}\n".format(
+                    options["M1"], len(prob), str(prob), prob_dens
                 )
             )
-        elif multipliticty==2:
-            print("M1={} q={} log10P={} ({}): {} -> {}\n".format(
-                    options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), str(prob), prob_dens
+        elif multipliticty == 2:
+            print(
+                "M1={} q={} log10P={} ({}): {} -> {}\n".format(
+                    options["M1"],
+                    options["M2"] / options["M1"],
+                    np.log10(options["P"]),
+                    len(prob),
+                    str(prob),
+                    prob_dens,
                 )
             )
-        elif multiplicity==3:
-            print("M1={} q={} log10P={} ({}): M3={}} P2={} ecc2={} : {} - {}".format(
-                    options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), options['M3'], np.log10('P2'), np.log10('ecc2'), str(prob), prob_dens
+        elif multiplicity == 3:
+            print(
+                "M1={} q={} log10P={} ({}): M3={}} P2={} ecc2={} : {} - {}".format(
+                    options["M1"],
+                    options["M2"] / options["M1"],
+                    np.log10(options["P"]),
+                    len(prob),
+                    options["M3"],
+                    np.log10("P2"),
+                    np.log10("ecc2"),
+                    str(prob),
+                    prob_dens,
                 )
             )
-        elif multipliticty==4:
-            print("M1={} q={} log10P={} ({}) : M3={} P2={}} ecc2={} : M4={} P3={} ecc3={} : {} -> {}".format(
-                    options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), options['M3'], np.log10('P2'), np.log10(options['ecc2']), options['M4'], np.log10('P3'), np.log10(options['ecc3']), str(prob), prob_dens
+        elif multipliticty == 4:
+            print(
+                "M1={} q={} log10P={} ({}) : M3={} P2={}} ecc2={} : M4={} P3={} ecc3={} : {} -> {}".format(
+                    options["M1"],
+                    options["M2"] / options["M1"],
+                    np.log10(options["P"]),
+                    len(prob),
+                    options["M3"],
+                    np.log10("P2"),
+                    np.log10(options["ecc2"]),
+                    options["M4"],
+                    np.log10("P3"),
+                    np.log10(options["ecc3"]),
+                    str(prob),
+                    prob_dens,
                 )
             )
     return prob_dens
-
diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py
index 4fcb1b7fa..315902923 100644
--- a/binarycpython/utils/functions.py
+++ b/binarycpython/utils/functions.py
@@ -19,7 +19,7 @@ import sys
 import h5py
 import numpy as np
 
-from io import StringIO 
+from io import StringIO
 from typing import Union, Any
 from collections import defaultdict
 
@@ -29,7 +29,18 @@ from binarycpython import _binary_c_bindings
 # Unsorted
 ########################################################
 
+
+def imports():
+    for name, val in globals().items():
+        if isinstance(val, types.ModuleType):
+            yield val.__name__
+
+
 class catchtime(object):
+    """
+    Context manager to calculate time spent
+    """
+
     def __enter__(self):
         self.t = time.clock()
         return self
@@ -40,20 +51,30 @@ class catchtime(object):
 
 
 def is_capsule(o):
+    """
+    Function to tell whether object is a capsule
+    """
+
     t = type(o)
-    return t.__module__ == 'builtins' and t.__name__ == 'PyCapsule'
+    return t.__module__ == "builtins" and t.__name__ == "PyCapsule"
 
 
 class Capturing(list):
+    """
+    Context manager to capture output and store it
+    """
+
     def __enter__(self):
         self._stdout = sys.stdout
         sys.stdout = self._stringio = StringIO()
         return self
+
     def __exit__(self, *args):
         self.extend(self._stringio.getvalue().splitlines())
-        del self._stringio    # free up some memory
+        del self._stringio  # free up some memory
         sys.stdout = self._stdout
 
+
 ########################################################
 # utility functions
 ########################################################
@@ -496,6 +517,7 @@ def output_lines(output: str) -> list:
     else:
         return []
 
+
 def example_parse_output(output: str, selected_header: str) -> dict:
     """
     Function that parses output of binary_c. This version serves as an example and is quite detailed. Custom functions can be easier:
@@ -1257,6 +1279,7 @@ def merge_dicts(dict_1: dict, dict_2: dict) -> dict:
     #
     return new_dict
 
+
 def update_dicts(dict_1: dict, dict_2: dict) -> dict:
     """
     Function to update dict_1 with values of dict_2 in a recursive way.
@@ -1348,6 +1371,7 @@ def update_dicts(dict_1: dict, dict_2: dict) -> dict:
     #
     return new_dict
 
+
 def extract_ensemble_json_from_string(binary_c_output: str) -> dict:
     """
     Function to extract the ensemble_json information from a raw binary_c output string
diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index 9f44a0931..56f73e215 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -28,21 +28,27 @@ import os
 import sys
 import copy
 import json
-import datetime
 import time
 import uuid
 import copy
+import types
 import logging
+import datetime
 import argparse
 import subprocess
 import importlib.util
 
+
 from typing import Union, Any
 from pathos.helpers import mp as pathos_multiprocess
 
 from pathos.pools import _ProcessPool as Pool
 
-from binarycpython.utils.grid_options_defaults import grid_options_defaults_dict
+from binarycpython.utils.grid_options_defaults import (
+    grid_options_defaults_dict,
+    moe_distefano_default_options,
+)
+
 from binarycpython.utils.custom_logging_functions import (
     autogen_C_logging_code,
     binary_c_log_code,
@@ -72,19 +78,10 @@ from binarycpython.utils.functions import (
 #     get_python_details,
 # )
 
-from binarycpython.utils.distribution_functions import (
-    Moecache,
-    LOG_LN_CONVERTER
-)
+from binarycpython.utils.distribution_functions import Moecache, LOG_LN_CONVERTER
 
 from binarycpython import _binary_c_bindings
 
-import types
-def imports():
-    for name, val in globals().items():
-        if isinstance(val, types.ModuleType):
-            yield val.__name__
-
 
 class Population:
     """
@@ -126,7 +123,7 @@ class Population:
         #
         self.process_ID = 0
 
-        # Create location to store results. Users should write to this dictionary. 
+        # Create location to store results. Users should write to this dictionary.
         self.grid_results = {}
 
         # Create location where ensemble results are written to
@@ -332,7 +329,12 @@ class Population:
 
         number = len(self.grid_options["_grid_variables"])
         for grid_variable in self.grid_options["_grid_variables"]:
-            if self.grid_options["_grid_variables"][grid_variable]['grid_variable_number'] == number-1:
+            if (
+                self.grid_options["_grid_variables"][grid_variable][
+                    "grid_variable_number"
+                ]
+                == number - 1
+            ):
                 return grid_variable
 
     def add_grid_variable(
@@ -389,7 +391,7 @@ class Population:
                 condition that has to be met in order for the grid generation to continue
                 example: condition = 'self.grid_options['binary']==1'
             gridtype:
-                Method on how the value range is sampled. Can be either edge (steps starting at the lower edge of the value range) or centered (steps starting at lower edge + 0.5 * stepsize). 
+                Method on how the value range is sampled. Can be either edge (steps starting at the lower edge of the value range) or centered (steps starting at lower edge + 0.5 * stepsize).
         """
 
         # Add grid_variable
@@ -614,7 +616,9 @@ class Population:
                 self.grid_options["custom_logging_func_memaddr"],
                 self.grid_options["_custom_logging_shared_library_file"],
             ) = create_and_load_logging_function(
-                custom_logging_code, verbose=self.grid_options["verbosity"], custom_tmp_dir=self.grid_options["tmp_dir"]
+                custom_logging_code,
+                verbose=self.grid_options["verbosity"],
+                custom_tmp_dir=self.grid_options["tmp_dir"],
             )
 
         elif self.grid_options["C_auto_logging"]:
@@ -634,7 +638,9 @@ class Population:
                 self.grid_options["custom_logging_func_memaddr"],
                 self.grid_options["_custom_logging_shared_library_file"],
             ) = create_and_load_logging_function(
-                custom_logging_code, verbose=self.grid_options["verbosity"], custom_tmp_dir=self.grid_options["tmp_dir"]
+                custom_logging_code,
+                verbose=self.grid_options["verbosity"],
+                custom_tmp_dir=self.grid_options["tmp_dir"],
             )
 
     ###################################################
@@ -857,7 +863,9 @@ class Population:
 
         # Put the values back as object properties
         self.grid_results = combined_output_dict["results"]
-        self.grid_ensemble_results = combined_output_dict['ensemble_results']   # Ensemble results are also passed as output from that dictionary
+        self.grid_ensemble_results = combined_output_dict[
+            "ensemble_results"
+        ]  # Ensemble results are also passed as output from that dictionary
         self.grid_options["_failed_count"] = combined_output_dict["_failed_count"]
         self.grid_options["_failed_prob"] = combined_output_dict["_failed_prob"]
         self.grid_options["_failed_systems_error_codes"] = list(
@@ -921,7 +929,13 @@ class Population:
         self.grid_options["_store_memaddr"] = _binary_c_bindings.return_store_memaddr()
 
         verbose_print(
-            "Process {} started at {}.\tUsing store memaddr {}".format(ID, datetime.datetime.now().isoformat(), self.grid_options["_store_memaddr"]), self.grid_options["verbosity"], 0
+            "Process {} started at {}.\tUsing store memaddr {}".format(
+                ID,
+                datetime.datetime.now().isoformat(),
+                self.grid_options["_store_memaddr"],
+            ),
+            self.grid_options["verbosity"],
+            0,
         )
 
         # Set the ensemble memaddr
@@ -936,7 +950,9 @@ class Population:
             }
 
             verbose_print(
-                "\tUsing persistent_data memaddr: {}".format(persistent_data_memaddr), self.grid_options["verbosity"], 0
+                "\tUsing persistent_data memaddr: {}".format(persistent_data_memaddr),
+                self.grid_options["verbosity"],
+                0,
             )
 
         #
@@ -960,7 +976,7 @@ class Population:
             0  # counter for the actual amt of systems this thread ran
         )
 
-        round_number_mod = 0 # rotating modulo
+        round_number_mod = 0  # rotating modulo
 
         total_time_calling_binary_c = 0
 
@@ -973,7 +989,9 @@ class Population:
                 system = next(generator)
 
                 # Check if the ID is the correct one for this process
-                if (localcounter + (ID + round_number_mod)) % self.grid_options["amt_cores"] == 0:
+                if (localcounter + (ID + round_number_mod)) % self.grid_options[
+                    "amt_cores"
+                ] == 0:
 
                     # Combine that with the other settings
                     full_system_dict = self.bse_options.copy()
@@ -985,22 +1003,34 @@ class Population:
 
                     #
                     verbose_print(
-                        "Process {} is handling system {}".format(ID, localcounter), self.grid_options["verbosity"], 2
+                        "Process {} is handling system {}".format(ID, localcounter),
+                        self.grid_options["verbosity"],
+                        2,
                     )
 
                     # In some cases, the whole run crashes. To be able to figure out which system that was on, we log each current system to a file (each thread has one). Each new system overrides the previous
-                    with open(os.path.join(self.grid_options["tmp_dir"], "thread_{}_current_system.txt".format(self.process_ID)), 'w') as f:
+                    with open(
+                        os.path.join(
+                            self.grid_options["tmp_dir"],
+                            "thread_{}_current_system.txt".format(self.process_ID),
+                        ),
+                        "w",
+                    ) as f:
                         binary_cmdline_string = self._return_argline(full_system_dict)
                         f.write(binary_cmdline_string)
 
                     start_runtime_binary_c = time.time()
 
                     # In the first system, explicitly check all the keys that are passed to see if they match the keys known to binary_c.
-                    # Won't do that every system cause that is a bit of a waste of computing time. 
+                    # Won't do that every system cause that is a bit of a waste of computing time.
                     if localcounter == 0:
                         for key in full_system_dict.keys():
                             if not key in self.available_keys:
-                                print("Error: Found a parameter unknown to binary_c: {}. Abort".format(key))
+                                print(
+                                    "Error: Found a parameter unknown to binary_c: {}. Abort".format(
+                                        key
+                                    )
+                                )
                                 raise ValueError
 
                     # TODO: build flag to actually evolve the system
@@ -1009,13 +1039,28 @@ class Population:
 
                     end_runtime_binary_c = time.time()
 
-                    total_time_calling_binary_c += end_runtime_binary_c - start_runtime_binary_c # keep track of total binary_c call time
+                    total_time_calling_binary_c += (
+                        end_runtime_binary_c - start_runtime_binary_c
+                    )  # keep track of total binary_c call time
 
                     # Debug line: logging all the lines
-                    if self.grid_options['log_runtime_systems']==1:
-                        with open(os.path.join(self.grid_options["tmp_dir"], "thread_{}_runtime_systems.txt".format(self.process_ID)), 'a+') as f:
-                            binary_cmdline_string = self._return_argline(full_system_dict)
-                            f.write("{} {}\n".format(end_runtime_binary_c-start_runtime_binary_c, binary_cmdline_string))
+                    if self.grid_options["log_runtime_systems"] == 1:
+                        with open(
+                            os.path.join(
+                                self.grid_options["tmp_dir"],
+                                "thread_{}_runtime_systems.txt".format(self.process_ID),
+                            ),
+                            "a+",
+                        ) as f:
+                            binary_cmdline_string = self._return_argline(
+                                full_system_dict
+                            )
+                            f.write(
+                                "{} {}\n".format(
+                                    end_runtime_binary_c - start_runtime_binary_c,
+                                    binary_cmdline_string,
+                                )
+                            )
 
                     # Keep track of systems:
                     probability_of_systems_run += full_system_dict["probability"]
@@ -1024,7 +1069,7 @@ class Population:
             except StopIteration:
                 running = False
 
-            if (localcounter+1)%self.grid_options["amt_cores"]==0:
+            if (localcounter + 1) % self.grid_options["amt_cores"] == 0:
                 round_number_mod += 1
 
             # print("thread {} round_nr_mod {}. localcounter {}".format(ID, round_number_mod, localcounter))
@@ -1036,7 +1081,11 @@ class Population:
         ensemble_json = {}  # Make sure it exists already
         if self.bse_options.get("ensemble", 0) == 1:
             verbose_print(
-                "Process {}: is freeing ensemble output (using persisten_data memaddr {})".format(ID, self.persistent_data_memory_dict[self.process_ID]), self.grid_options["verbosity"], 2
+                "Process {}: is freeing ensemble output (using persisten_data memaddr {})".format(
+                    ID, self.persistent_data_memory_dict[self.process_ID]
+                ),
+                self.grid_options["verbosity"],
+                2,
             )
 
             ensemble_raw_output = (
@@ -1046,16 +1095,20 @@ class Population:
             )
             if ensemble_raw_output == None:
                 verbose_print(
-                    "Process {}: Warning! Ensemble output is empty. ".format(ID), self.grid_options["verbosity"], 2
+                    "Process {}: Warning! Ensemble output is empty. ".format(ID),
+                    self.grid_options["verbosity"],
+                    2,
                 )
 
             #
             if self.grid_options["combine_ensemble_with_thread_joining"] == True:
                 verbose_print(
-                    "Process {}: Extracting ensemble info from raw string".format(ID), self.grid_options["verbosity"], 2
+                    "Process {}: Extracting ensemble info from raw string".format(ID),
+                    self.grid_options["verbosity"],
+                    2,
                 )
 
-                ensemble_json['ensemble'] = extract_ensemble_json_from_string(
+                ensemble_json["ensemble"] = extract_ensemble_json_from_string(
                     ensemble_raw_output
                 )  # Load this into a dict so that we can combine it later
 
@@ -1081,7 +1134,6 @@ class Population:
         # free store mem:
         _binary_c_bindings.free_store_memaddr(self.grid_options["_store_memaddr"])
 
-
         # Return a set of results and errors
         output_dict = {
             "results": self.grid_results,
@@ -1104,7 +1156,7 @@ class Population:
                 ID,
                 start_process_time.isoformat(),
                 end_process_time.isoformat(),
-                (end_process_time-start_process_time).total_seconds(),
+                (end_process_time - start_process_time).total_seconds(),
                 total_time_calling_binary_c,
                 number_of_systems_run,
                 probability_of_systems_run,
@@ -1261,9 +1313,9 @@ class Population:
             # #
             # self._load_grid_function()
 
-            # 
+            #
             self.grid_options["_system_generator"] = None
-            
+
         # Source file
         elif self.grid_options["evolution_type"] == "source_file":
             #######################
@@ -1432,7 +1484,6 @@ class Population:
         code_string += indent * depth + "eccentricity3 = None\n"
         code_string += indent * depth + "\n"
 
-
         # Prepare the probability
         code_string += indent * depth + "# setting probability lists\n"
         for grid_variable_el in sorted(
@@ -1460,7 +1511,6 @@ class Population:
             print("Constructing/adding: {}".format(grid_variable_el[0]))
             grid_variable = grid_variable_el[1]
 
-
             #########################
             # Setting up the forloop
             # Add comment for forloop
@@ -1481,18 +1531,21 @@ class Population:
             # TODO: Make clear that the phasevol only works good
             # TODO: add option to ignore this phasevol calculation and set it to 1
             #   if you sample linearly in that thing.
-            if not grid_variable['dphasevol']==-1: # A method to turn off this calculation and allow a phasevol = 1
+            if (
+                not grid_variable["dphasevol"] == -1
+            ):  # A method to turn off this calculation and allow a phasevol = 1
                 code_string += (
                     indent * depth
                     + "phasevol_{} = sampled_values_{}[1]-sampled_values_{}[0]".format(
-                        grid_variable["name"], grid_variable["name"], grid_variable["name"]
+                        grid_variable["name"],
+                        grid_variable["name"],
+                        grid_variable["name"],
                     )
                     + "\n"
                 )
             else:
-                code_string += (
-                    indent * depth
-                    + "phasevol_{} = 1\n".format(grid_variable["name"])
+                code_string += indent * depth + "phasevol_{} = 1\n".format(
+                    grid_variable["name"]
                 )
 
             # # Some print statement
@@ -2655,7 +2708,9 @@ class Population:
         """
 
         if binary_c_output:
-            if (binary_c_output.splitlines()[0].startswith("SYSTEM_ERROR")) or (binary_c_output.splitlines()[-1].startswith("SSYSTEM_ERROR")):
+            if (binary_c_output.splitlines()[0].startswith("SYSTEM_ERROR")) or (
+                binary_c_output.splitlines()[-1].startswith("SSYSTEM_ERROR")
+            ):
                 verbose_print(
                     "FAILING SYSTEM FOUND",
                     self.grid_options["verbosity"],
@@ -2676,8 +2731,13 @@ class Population:
                         .strip()
                     )
 
-                    if not error_code in self.grid_options["_failed_systems_error_codes"]:
-                        self.grid_options["_failed_systems_error_codes"].append(error_code)
+                    if (
+                        not error_code
+                        in self.grid_options["_failed_systems_error_codes"]
+                    ):
+                        self.grid_options["_failed_systems_error_codes"].append(
+                            error_code
+                        )
                 except ValueError:
                     verbose_print(
                         "Failed to extract the error-code",
@@ -2717,9 +2777,12 @@ class Population:
                         f.write(argstring + "\n")
         else:
             verbose_print(
-                "binary_c had 0 output. Which is strange", self.grid_options['verbosity'], 2)
+                "binary_c had 0 output. Which is strange",
+                self.grid_options["verbosity"],
+                2,
+            )
 
-################################################################################################
+    ################################################################################################
     def Moe_de_Stefano_2017(self, options=None):
         """
         Class method of the population class
@@ -2734,185 +2797,76 @@ class Population:
             print("Please provide a options dictionary.")
             raise ValueError
 
-        if not options.get('file', None):
-            print("Please set option 'file' to point to the Moe and de Stefano JSON file")
+        if not options.get("file", None):
+            print(
+                "Please set option 'file' to point to the Moe and de Stefano JSON file"
+            )
             raise ValueError
         else:
-            if not os.path.isfile(options['file']):
-                print("The provided 'file' Moe and de Stefano JSON file does not seem to exist at {}".format(options['file']))
+            if not os.path.isfile(options["file"]):
+                print(
+                    "The provided 'file' Moe and de Stefano JSON file does not seem to exist at {}".format(
+                        options["file"]
+                    )
+                )
                 raise ValueError
-            if not options['file'].endswith('.json'):
+            if not options["file"].endswith(".json"):
                 print("Provided filename does not end with .json")
 
         # TODO: put this in defaults
-
-        # Set default values
-        default_options = {
-            'resolutions':
-            {
-                'M': [
-                    100, # M1 (irrelevant if time adaptive)
-                    40,  # M2 (i.e. q)
-                    10,  # M3 currently unused
-                    10   # M4 currently unused
-                ],
-                'logP': [
-                    40, # P2 (binary period)
-                    2,  # P3 (triple period) currently unused
-                    2   # P4 (quadruple period) currently unused
-                ],
-                'ecc': [
-                    10, # e2 (binary eccentricity) currently unused
-                    10, # e3 (triple eccentricity) currently unused
-                    10  # e4 (quadruple eccentricity) currently unused
-                ],
-            },
-
-            'ranges':
-            {
-                # stellar masses (Msun)
-                'M': [
-                    0.07, # maximum brown dwarf mass == minimum stellar mass
-                    80.0   # (rather arbitrary) upper mass cutoff
-                ],
-
-                'q': [
-                    None, # artificial qmin : set to None to use default
-                    None  # artificial qmax : set to None to use default
-                ],
-
-                'logP': [
-                    0.0, # 0 = log10(1 day)
-                    8.0  # 8 = log10(10^8 days)
-                ],
-
-                'ecc': [
-                    0.0,
-                    1.0
-                ]
-            },
-
-            # minimum stellar mass
-            'Mmin':0.07,
-
-            # multiplicity model (as a function of log10M1)
-            #
-            # You can use 'Poisson' which uses the system multiplicty
-            # given by Moe and maps this to single/binary/triple/quad
-            # fractions.
-            #
-            # Alternatively, 'data' takes the fractions directly
-            # from the data, but then triples and quadruples are
-            # combined (and there are NO quadruples).
-            'multiplicity_model': 'Poisson',
-
-            # multiplicity modulator:
-            # [single, binary, triple, quadruple]
-            #
-            # e.g. [1,0,0,0] for single stars only
-            #      [0,1,0,0] for binary stars only
-            #
-            # defaults to [1,1,1,1] i.e. all types
-            #
-            'multiplicity_modulator': [
-                1, # single
-                1, # binary
-                1, # triple
-                1, # quadruple
-            ],
-
-            # given a mix of multiplities, you can either (noting that
-            # here (S,B,T,Q) = appropriate modulator * model(S,B,T,Q) )
-            #
-            # 'norm'  : normalize so the whole population is 1.0
-            #           after implementing the appropriate fractions
-            #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-            #
-            # 'raw'   : stick to what is predicted, i.e.
-            #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-            #           without normalization
-            #           (in which case the total probability < 1.0 unless
-            #            all you use single, binary, triple and quadruple)
-            #
-            # 'merge' : e.g. if you only have single and binary,
-            #           add the triples and quadruples to the binaries, so
-            #           binaries represent all multiple systems
-            #           ...
-            #           *** this is canonical binary population synthesis ***
-            #
-            #           Note: if multiplicity_modulator == [1,1,1,1] this
-            #                 option does nothing (equivalent to 'raw').
-            #
-            #
-            # note: if you only set one multiplicity_modulator
-            # to 1, and all the others to 0, then normalizing
-            # will mean that you effectively have the same number
-            # of stars as single, binary, triple or quad (whichever
-            # is non-zero) i.e. the multiplicity fraction is ignored.
-            # This is probably not useful except for
-            # testing purposes or comparing to old grids.
-            'normalize_multiplicities': 'merge',
-
-            # q extrapolation (below 0.15) method
-            # none
-            # flat
-            # linear2
-            # plaw2
-            # nolowq
-            'q_low_extrapolation_method': 'linear2',
-            'q_high_extrapolation_method': 'linear2',
-        }
+        default_options = moe_distefano_default_options.deepcopy()
 
         # Take the option dictionary that was given and override.
         options = update_dicts(default_options, options)
 
         # Write to a file
-        with open('/tmp/moeopts.dat', 'w') as f:
+        with open("/tmp/moeopts.dat", "w") as f:
             f.write(json.dumps(options, indent=4))
 
         # Read input data and Clean up the data if there are whitespaces around the keys
-        with open(options['file'], 'r') as data_filehandle:
+        with open(options["file"], "r") as data_filehandle:
             datafile_data = data_filehandle.read()
-        datafile_data = datafile_data.replace("\" ", "\"")
-        datafile_data = datafile_data.replace(" \"", "\"")
-        datafile_data = datafile_data.replace(" \"", "\"")
+        datafile_data = datafile_data.replace('" ', '"')
+        datafile_data = datafile_data.replace(' "', '"')
+        datafile_data = datafile_data.replace(' "', '"')
         json_data = json.loads(datafile_data)
 
         # entry of log10M1 is a list containing 1 dict. We can take the dict out of the list
-        json_data['log10M1'] = json_data['log10M1'][0]
+        json_data["log10M1"] = json_data["log10M1"][0]
 
         # Get all the masses
-        logmasses = sorted(json_data['log10M1'].keys())
+        logmasses = sorted(json_data["log10M1"].keys())
         if not logmasses:
             print("The table does not contain masses")
             raise ValueError
-        with open("/tmp/moe.log", 'w') as logfile:
+        with open("/tmp/moe.log", "w") as logfile:
             logfile.write("log₁₀Masses(M☉) {}\n".format(logmasses))
 
         # Get all the periods and see if they are all consistently present
         logperiods = []
         for logmass in logmasses:
             if not logperiods:
-                logperiods = sorted(json_data['log10M1'][logmass]['logP'])
+                logperiods = sorted(json_data["log10M1"][logmass]["logP"])
                 dlog10P = float(logperiods[1]) - float(logperiods[0])
 
-            current_logperiods = sorted(json_data['log10M1'][logmass]['logP'])
+            current_logperiods = sorted(json_data["log10M1"][logmass]["logP"])
 
             if not (logperiods == current_logperiods):
                 print("Period values are not consistent throughout the dataset")
                 raise ValueError
 
-
             ############################################################
             # log10period binwidth : of course this assumes a fixed
             # binwidth, so we check for this too.
 
-            for i in range(len(current_logperiods)-1):
-                if not dlog10P == (float(current_logperiods[i+1]) - float(current_logperiods[i])):
+            for i in range(len(current_logperiods) - 1):
+                if not dlog10P == (
+                    float(current_logperiods[i + 1]) - float(current_logperiods[i])
+                ):
                     print("Period spacing is not consistent throughout the dataset")
                     raise ValueError
 
-        with open("/tmp/moe.log", 'a') as logfile:
+        with open("/tmp/moe.log", "a") as logfile:
             logfile.write("log₁₀Periods(days) {}\n".format(logperiods))
 
         # Fill the global dict
@@ -2920,17 +2874,17 @@ class Population:
             # print("Logmass: {}".format(logmass))
 
             # Create the multiplicity table
-            if not Moecache.get('multiplicity_table', None):
-                Moecache['multiplicity_table'] = []
+            if not Moecache.get("multiplicity_table", None):
+                Moecache["multiplicity_table"] = []
 
             # multiplicity as a function of primary mass
-            Moecache['multiplicity_table'].append(
+            Moecache["multiplicity_table"].append(
                 [
                     float(logmass),
-                    json_data['log10M1'][logmass]['f_multi'],
-                    json_data['log10M1'][logmass]['single star fraction'],
-                    json_data['log10M1'][logmass]['binary star fraction'],
-                    json_data['log10M1'][logmass]['triple/quad star fraction'],
+                    json_data["log10M1"][logmass]["f_multi"],
+                    json_data["log10M1"][logmass]["single star fraction"],
+                    json_data["log10M1"][logmass]["binary star fraction"],
+                    json_data["log10M1"][logmass]["triple/quad star fraction"],
                 ]
             )
 
@@ -2941,7 +2895,7 @@ class Population:
 
             ############################################################
             # loop over either binary or triple-outer periods
-            first = 1;
+            first = 1
 
             # Go over the periods
             for logperiod in logperiods:
@@ -2957,33 +2911,45 @@ class Population:
                     first = 0
 
                     # Create the multiplicity table
-                    if not Moecache.get('period_distributions', None):
-                        Moecache['period_distributions'] = []
+                    if not Moecache.get("period_distributions", None):
+                        Moecache["period_distributions"] = []
 
                     ############################################################
                     # lower bound the period distributions to zero probability
-                    Moecache['period_distributions'].append(
+                    Moecache["period_distributions"].append(
                         [
-                            float(logmass), 
+                            float(logmass),
                             float(logperiod) - 0.5 * dlog10P - epslog10P,
                             0.0,
-                            0.0
+                            0.0,
                         ]
                     )
-                    Moecache['period_distributions'].append(
+                    Moecache["period_distributions"].append(
                         [
-                            float(logmass), 
+                            float(logmass),
                             float(logperiod) - 0.5 * dlog10P,
-                            json_data['log10M1'][logmass]['logP'][logperiod]['normed_bin_frac_p_dist']/dlog10P,
-                            json_data['log10M1'][logmass]['logP'][logperiod]['normed_tripquad_frac_p_dist']/dlog10P
+                            json_data["log10M1"][logmass]["logP"][logperiod][
+                                "normed_bin_frac_p_dist"
+                            ]
+                            / dlog10P,
+                            json_data["log10M1"][logmass]["logP"][logperiod][
+                                "normed_tripquad_frac_p_dist"
+                            ]
+                            / dlog10P,
                         ]
                     )
-                Moecache['period_distributions'].append(
+                Moecache["period_distributions"].append(
                     [
-                        float(logmass), 
+                        float(logmass),
                         float(logperiod),
-                        json_data['log10M1'][logmass]['logP'][logperiod]['normed_bin_frac_p_dist']/dlog10P,
-                        json_data['log10M1'][logmass]['logP'][logperiod]['normed_tripquad_frac_p_dist']/dlog10P
+                        json_data["log10M1"][logmass]["logP"][logperiod][
+                            "normed_bin_frac_p_dist"
+                        ]
+                        / dlog10P,
+                        json_data["log10M1"][logmass]["logP"][logperiod][
+                            "normed_tripquad_frac_p_dist"
+                        ]
+                        / dlog10P,
                     ]
                 )
 
@@ -2992,85 +2958,81 @@ class Population:
                 #
                 # First, get a list of the qs given by Moe
                 #
-                qs = sorted(json_data['log10M1'][logmass]['logP'][logperiod]['q'])
+                qs = sorted(json_data["log10M1"][logmass]["logP"][logperiod]["q"])
                 qdata = {}
                 I_q = 0.0
 
                 # get the q distribution from M&S, which ranges 0.15 to 0.95
                 for q in qs:
-                    val = json_data['log10M1'][logmass]['logP'][logperiod]['q'][q]
+                    val = json_data["log10M1"][logmass]["logP"][logperiod]["q"][q]
                     qdata[q] = val
                     I_q += val
 
                 # hence normalize to 1.0
                 for q in qs:
-                    qdata[q] = qdata[q]/I_q
+                    qdata[q] = qdata[q] / I_q
 
                 # Create the multiplicity table
-                if not Moecache.get('q_distributions', None):
-                    Moecache['q_distributions'] = []
+                if not Moecache.get("q_distributions", None):
+                    Moecache["q_distributions"] = []
 
                 for q in qs:
-                    Moecache['q_distributions'].append(
-                        [
-                            float(logmass),
-                            float(logperiod),
-                            float(q),
-                            qdata[q]
-                        ]
+                    Moecache["q_distributions"].append(
+                        [float(logmass), float(logperiod), float(q), qdata[q]]
                     )
 
                 ############################################################
                 # eccentricity distributions as a function of mass, period, ecc
-                eccs = sorted(json_data['log10M1'][logmass]['logP'][logperiod]['e'])
+                eccs = sorted(json_data["log10M1"][logmass]["logP"][logperiod]["e"])
                 ecc_data = {}
                 I_e = 0
 
                 # Read out the data
                 for ecc in eccs:
-                    val = json_data['log10M1'][logmass]['logP'][logperiod]['e'][ecc]
+                    val = json_data["log10M1"][logmass]["logP"][logperiod]["e"][ecc]
                     ecc_data[ecc] = val
                     I_e += val
 
                 # Normalize the data
                 for ecc in eccs:
-                    ecc_data[ecc] = ecc_data[ecc]/I_e
+                    ecc_data[ecc] = ecc_data[ecc] / I_e
 
                 # Create the multiplicity table
-                if not Moecache.get('ecc_distributions', None):
-                    Moecache['ecc_distributions'] = []
+                if not Moecache.get("ecc_distributions", None):
+                    Moecache["ecc_distributions"] = []
 
                 for ecc in eccs:
-                    Moecache['ecc_distributions'].append(
-                        [
-                            float(logmass),
-                            float(logperiod),
-                            float(ecc),
-                            ecc_data[ecc]
-                        ]
+                    Moecache["ecc_distributions"].append(
+                        [float(logmass), float(logperiod), float(ecc), ecc_data[ecc]]
                     )
 
             ############################################################
             # upper bound the period distributions to zero probability
-            Moecache['period_distributions'].append(
+            Moecache["period_distributions"].append(
                 [
-                    float(logmass), 
+                    float(logmass),
                     float(logperiods[-1]) + 0.5 * dlog10P,
-                    json_data['log10M1'][logmass]['logP'][logperiods[-1]]['normed_bin_frac_p_dist']/dlog10P,
-                    json_data['log10M1'][logmass]['logP'][logperiods[-1]]['normed_tripquad_frac_p_dist']/dlog10P
+                    json_data["log10M1"][logmass]["logP"][logperiods[-1]][
+                        "normed_bin_frac_p_dist"
+                    ]
+                    / dlog10P,
+                    json_data["log10M1"][logmass]["logP"][logperiods[-1]][
+                        "normed_tripquad_frac_p_dist"
+                    ]
+                    / dlog10P,
                 ]
             )
-            Moecache['period_distributions'].append(
+            Moecache["period_distributions"].append(
                 [
-                    float(logmass), 
+                    float(logmass),
                     float(logperiods[-1]) + 0.5 * dlog10P + epslog10P,
                     0.0,
-                    0.0
+                    0.0,
                 ]
             )
 
         # Write to logfile
-        with open("/tmp/moecache.json", 'w') as cache_filehandle:
+        with open("/tmp/moecache.json", "w") as cache_filehandle:
             cache_filehandle.write(json.dumps(Moecache, indent=4))
 
         # TODO: remove this and make the plotting of the multiplicity fractions more modular
@@ -3110,13 +3072,12 @@ class Population:
         # the probability density function in the gridcode
         # TODO: check if that works, if we just pass the options as a dict
 
-
         ############################################################
         # first, the multiplicity, this is 1,2,3,4, ...
         # for singles, binaries, triples, quadruples, ...
         max_multiplicity = 0
         for i in range(1, 5):
-            mod = options['multiplicity_modulator'][i-1]
+            mod = options["multiplicity_modulator"][i - 1]
             print("Check mod {} = {}".format(i, mod))
             if mod > 0:
                 max_multiplicity = i
@@ -3127,281 +3088,328 @@ class Population:
 
         # Multiplicity
         self.add_grid_variable(
-                name='multiplicity',
-                parameter_name='multiplicity',
-                longname='multiplicity',
-                valuerange=[1, max_multiplicity],
-                resolution=1,
-                spacingfunc='range(1, 5)',
-                precode='self.grid_options["multiplicity"] = multiplicity; options={}'.format(options),
-                condition='({}[multiplicity-1] > 0)'.format(str(options['multiplicity_modulator'])),
-                gridtype='edge',
-                dphasevol=-1,
-                probdist=1,
+            name="multiplicity",
+            parameter_name="multiplicity",
+            longname="multiplicity",
+            valuerange=[1, max_multiplicity],
+            resolution=1,
+            spacingfunc="range(1, 5)",
+            precode='self.grid_options["multiplicity"] = multiplicity; options={}'.format(
+                options
+            ),
+            condition="({}[multiplicity-1] > 0)".format(
+                str(options["multiplicity_modulator"])
+            ),
+            gridtype="edge",
+            dphasevol=-1,
+            probdist=1,
         )
 
         ############################################################
         # always require M1, for all systems
         #
 
-        # TODO: put in option for the time-adaptive grid. 
+        # TODO: put in option for the time-adaptive grid.
 
         # log-spaced m1 with given resolution
         self.add_grid_variable(
-            name='lnm1',
-            parameter_name='M_1',
-            longname='Primary mass',
+            name="lnm1",
+            parameter_name="M_1",
+            longname="Primary mass",
             resolution="options['resolutions']['M'][0]",
-            spacingfunc="const(np.log({}), np.log({}), {})".format(options['ranges']['M'][0], options['ranges']['M'][1], options['resolutions']['M'][0]),
+            spacingfunc="const(np.log({}), np.log({}), {})".format(
+                options["ranges"]["M"][0],
+                options["ranges"]["M"][1],
+                options["resolutions"]["M"][0],
+            ),
             valuerange=[
-                'np.log({})'.format(options['ranges']['M'][0]),
-                'np.log({})'.format(options['ranges']['M'][1])
+                "np.log({})".format(options["ranges"]["M"][0]),
+                "np.log({})".format(options["ranges"]["M"][1]),
             ],
-            gridtype='centred',
-            dphasevol='dlnm1',
+            gridtype="centred",
+            dphasevol="dlnm1",
             precode='M_1 = np.exp(lnm1); options["M1"]=M_1',
-            probdist='Moe_de_Stefano_2017_pdf({{{}, {}, {}}}) if multiplicity == 1 else 1'.format(str(options)[1:-1], "'multiplicity': multiplicity", "'M1': M_1")
+            probdist="Moe_de_Stefano_2017_pdf({{{}, {}, {}}}) if multiplicity == 1 else 1".format(
+                str(options)[1:-1], "'multiplicity': multiplicity", "'M1': M_1"
+            ),
         )
 
         # Go to higher multiplicities
-        if (max_multiplicity >= 2):
+        if max_multiplicity >= 2:
             # binaries: period
             self.add_grid_variable(
-                name='log10per',
-                parameter_name='orbital_period',
-                longname='log10(Orbital_Period)',
-                resolution=options['resolutions']['logP'][0],
+                name="log10per",
+                parameter_name="orbital_period",
+                longname="log10(Orbital_Period)",
+                resolution=options["resolutions"]["logP"][0],
                 probdist=1.0,
                 condition='(self.grid_options["grid_options"]["multiplicity"] >= 2)',
                 branchpoint=1,
-                gridtype='centred',
-                dphasevol='({} * dlog10per)'.format(LOG_LN_CONVERTER),
-                valuerange=[
-                    options['ranges']['logP'][0],
-                    options['ranges']['logP'][1]
-                ],
-                spacingfunc="const({}, {}, {})".format(options['ranges']['logP'][0], options['ranges']['logP'][1], options['resolutions']['logP'][0]),
+                gridtype="centred",
+                dphasevol="({} * dlog10per)".format(LOG_LN_CONVERTER),
+                valuerange=[options["ranges"]["logP"][0], options["ranges"]["logP"][1]],
+                spacingfunc="const({}, {}, {})".format(
+                    options["ranges"]["logP"][0],
+                    options["ranges"]["logP"][1],
+                    options["resolutions"]["logP"][0],
+                ),
                 precode="""orbital_period = 10.0**log10per
     qmin={}/m1
     qmax=self.maximum_mass_ratio_for_RLOF(m1, per)
-    """.format(options.get('Mmin', 0.07))
+    """.format(
+                    options.get("Mmin", 0.07)
+                ),
             )
 
             # binaries: mass ratio
             self.add_grid_variable(
-                name='q',
-                parameter_name='M_2',
-                longname='Mass ratio',
+                name="q",
+                parameter_name="M_2",
+                longname="Mass ratio",
                 valuerange=[
-                    options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/m1",
-                    options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "qmax"
+                    options["ranges"]["q"][0]
+                    if options.get("ranges", {}).get("q", None)
+                    else "options.get('Mmin', 0.07)/m1",
+                    options["ranges"]["q"][1]
+                    if options.get("ranges", {}).get("q", None)
+                    else "qmax",
                 ],
-                resolution=options['resolutions']['M'][1],
+                resolution=options["resolutions"]["M"][1],
                 probdist=1,
-                gridtype='centred',
-                dphasevol='dq',
+                gridtype="centred",
+                dphasevol="dq",
                 precode="""
     M_2 = q * m1
     sep = calc_sep_from_period(M_1, M_2, orbital_period)
     """,
                 spacingfunc="const({}, {}, {})".format(
-                    options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/m1",
-                    options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "qmax",
-                    options['resolutions']['M'][1]
-                )
+                    options["ranges"]["q"][0]
+                    if options.get("ranges", {}).get("q", None)
+                    else "options.get('Mmin', 0.07)/m1",
+                    options["ranges"]["q"][1]
+                    if options.get("ranges", {}).get("q", None)
+                    else "qmax",
+                    options["resolutions"]["M"][1],
+                ),
             )
 
             # (optional) binaries: eccentricity
-            if options['resolutions']['ecc'][0] > 0:
+            if options["resolutions"]["ecc"][0] > 0:
                 self.add_grid_variable(
-                    name='ecc',
-                    parameter_name='eccentricity',
-                    longname='Eccentricity',
-                    resolution=options['resolutions']['ecc'][0],
+                    name="ecc",
+                    parameter_name="eccentricity",
+                    longname="Eccentricity",
+                    resolution=options["resolutions"]["ecc"][0],
                     probdist=1,
-                    gridtype='centred',
-                    dphasevol='decc',
-                    precode='eccentricity=ecc',
+                    gridtype="centred",
+                    dphasevol="decc",
+                    precode="eccentricity=ecc",
                     valuerange=[
-                        options['ranges']['ecc'][0], # Just fail if not defined.
-                        options['ranges']['ecc'][1]
+                        options["ranges"]["ecc"][0],  # Just fail if not defined.
+                        options["ranges"]["ecc"][1],
                     ],
                     spacingfunc="const({}, {}, {})".format(
-                        options['ranges']['ecc'][0], # Just fail if not defined.
-                        options['ranges']['ecc'][1],
-                        options['resolutions']['ecc'][0]
-                    )
+                        options["ranges"]["ecc"][0],  # Just fail if not defined.
+                        options["ranges"]["ecc"][1],
+                        options["resolutions"]["ecc"][0],
+                    ),
                 )
 
             # Now for triples and quadruples
-            if (max_multiplicity >= 3):
+            if max_multiplicity >= 3:
                 # Triple: period
                 self.add_grid_variable(
-                    name='log10per2',
-                    parameter_name='orbital_period_triple',
-                    longname='log10(Orbital_Period2)',
-                    resolution=options['resolutions']['logP'][1],
+                    name="log10per2",
+                    parameter_name="orbital_period_triple",
+                    longname="log10(Orbital_Period2)",
+                    resolution=options["resolutions"]["logP"][1],
                     probdist=1.0,
                     condition='(self.grid_options["grid_options"]["multiplicity"] >= 3)',
                     branchpoint=1,
-                    gridtype='centred',
-                    dphasevol='({} * dlog10per2)'.format(LOG_LN_CONVERTER),
+                    gridtype="centred",
+                    dphasevol="({} * dlog10per2)".format(LOG_LN_CONVERTER),
                     valuerange=[
-                        options['ranges']['logP'][0],
-                        options['ranges']['logP'][1]
+                        options["ranges"]["logP"][0],
+                        options["ranges"]["logP"][1],
                     ],
-                    spacingfunc="const({}, {}, {})".format(options['ranges']['logP'][0], options['ranges']['logP'][1], options['resolutions']['logP'][1]),
+                    spacingfunc="const({}, {}, {})".format(
+                        options["ranges"]["logP"][0],
+                        options["ranges"]["logP"][1],
+                        options["resolutions"]["logP"][1],
+                    ),
                     precode="""orbital_period_triple = 10.0**log10per2
     q2min={}/(M_1+M_2)
     q2max=self.maximum_mass_ratio_for_RLOF(M_1+M_2, orbital_period_triple)
-    """.format(options.get('Mmin', 0.07))
+    """.format(
+                        options.get("Mmin", 0.07)
+                    ),
                 )
 
                 # Triples: mass ratio
                 # Note, the mass ratio is M_outer/M_inner
-                print("M2 resolution {}".format(options['resolutions']['M'][2]))
+                print("M2 resolution {}".format(options["resolutions"]["M"][2]))
                 self.add_grid_variable(
-                    name='q2',
-                    parameter_name='M_3',
-                    longname='Mass ratio outer/inner',
+                    name="q2",
+                    parameter_name="M_3",
+                    longname="Mass ratio outer/inner",
                     valuerange=[
-                        options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m1+m2)",
-                        options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q2max"
+                        options["ranges"]["q"][0]
+                        if options.get("ranges", {}).get("q", None)
+                        else "options.get('Mmin', 0.07)/(m1+m2)",
+                        options["ranges"]["q"][1]
+                        if options.get("ranges", {}).get("q", None)
+                        else "q2max",
                     ],
-                    resolution=options['resolutions']['M'][2],
+                    resolution=options["resolutions"]["M"][2],
                     probdist=1,
-                    gridtype='centred',
-                    dphasevol='dq2',
+                    gridtype="centred",
+                    dphasevol="dq2",
                     precode="""
     M_3 = q2 * (M_1 + M_2)
     sep2 = calc_sep_from_period((M_1+M_2), M_3, orbital_period_triple)
     eccentricity2=0
     """,
                     spacingfunc="const({}, {}, {})".format(
-                        options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m1+m2)",
-                        options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q2max",
-                        options['resolutions']['M'][2]
-                    )
+                        options["ranges"]["q"][0]
+                        if options.get("ranges", {}).get("q", None)
+                        else "options.get('Mmin', 0.07)/(m1+m2)",
+                        options["ranges"]["q"][1]
+                        if options.get("ranges", {}).get("q", None)
+                        else "q2max",
+                        options["resolutions"]["M"][2],
+                    ),
                 )
 
                 # (optional) triples: eccentricity
-                if options['resolutions']['ecc'][1] > 0:
+                if options["resolutions"]["ecc"][1] > 0:
                     self.add_grid_variable(
-                        name='ecc2',
-                        parameter_name='eccentricity2',
-                        longname='Eccentricity of the triple',
-                        resolution=options['resolutions']['ecc'][1],
+                        name="ecc2",
+                        parameter_name="eccentricity2",
+                        longname="Eccentricity of the triple",
+                        resolution=options["resolutions"]["ecc"][1],
                         probdist=1,
-                        gridtype='centred',
-                        dphasevol='decc2',
-                        precode='eccentricity2=ecc2',
+                        gridtype="centred",
+                        dphasevol="decc2",
+                        precode="eccentricity2=ecc2",
                         valuerange=[
-                            options['ranges']['ecc'][0], # Just fail if not defined.
-                            options['ranges']['ecc'][1]
+                            options["ranges"]["ecc"][0],  # Just fail if not defined.
+                            options["ranges"]["ecc"][1],
                         ],
                         spacingfunc="const({}, {}, {})".format(
-                            options['ranges']['ecc'][0], # Just fail if not defined.
-                            options['ranges']['ecc'][1],
-                            options['resolutions']['ecc'][1]
-                        )
+                            options["ranges"]["ecc"][0],  # Just fail if not defined.
+                            options["ranges"]["ecc"][1],
+                            options["resolutions"]["ecc"][1],
+                        ),
                     )
 
-                if (max_multiplicity==4):
+                if max_multiplicity == 4:
                     # Quadruple: period
                     self.add_grid_variable(
-                        name='log10per3',
-                        parameter_name='orbital_period_quadruple',
-                        longname='log10(Orbital_Period3)',
-                        resolution=options['resolutions']['logP'][2],
+                        name="log10per3",
+                        parameter_name="orbital_period_quadruple",
+                        longname="log10(Orbital_Period3)",
+                        resolution=options["resolutions"]["logP"][2],
                         probdist=1.0,
                         condition='(self.grid_options["grid_options"]["multiplicity"] >= 4)',
                         branchpoint=1,
-                        gridtype='centred',
-                        dphasevol='({} * dlog10per3)'.format(LOG_LN_CONVERTER),
+                        gridtype="centred",
+                        dphasevol="({} * dlog10per3)".format(LOG_LN_CONVERTER),
                         valuerange=[
-                            options['ranges']['logP'][0],
-                            options['ranges']['logP'][1]
+                            options["ranges"]["logP"][0],
+                            options["ranges"]["logP"][1],
                         ],
                         spacingfunc="const({}, {}, {})".format(
-                            options['ranges']['logP'][0], 
-                            options['ranges']['logP'][1], 
-                            options['resolutions']['logP'][2]
+                            options["ranges"]["logP"][0],
+                            options["ranges"]["logP"][1],
+                            options["resolutions"]["logP"][2],
                         ),
                         precode="""orbital_period_quadruple = 10.0**log10per3
     q3min={}/(M_3)
     q3max=self.maximum_mass_ratio_for_RLOF(M_3, orbital_period_quadruple)
-    """.format(options.get('Mmin', 0.07))
+    """.format(
+                            options.get("Mmin", 0.07)
+                        ),
                     )
 
                     # Quadruple: mass ratio : M_outer / M_inner
-                    print("M3 resolution {}".format(options['resolutions']['M'][3]))
+                    print("M3 resolution {}".format(options["resolutions"]["M"][3]))
                     self.add_grid_variable(
-                        name='q3',
-                        parameter_name='M_4',
-                        longname='Mass ratio outer low/outer high',
+                        name="q3",
+                        parameter_name="M_4",
+                        longname="Mass ratio outer low/outer high",
                         valuerange=[
-                            options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m3)",
-                            options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q3max"
+                            options["ranges"]["q"][0]
+                            if options.get("ranges", {}).get("q", None)
+                            else "options.get('Mmin', 0.07)/(m3)",
+                            options["ranges"]["q"][1]
+                            if options.get("ranges", {}).get("q", None)
+                            else "q3max",
                         ],
-                        resolution=options['resolutions']['M'][3],
+                        resolution=options["resolutions"]["M"][3],
                         probdist=1,
-                        gridtype='centred',
-                        dphasevol='dq3',
+                        gridtype="centred",
+                        dphasevol="dq3",
                         precode="""
     M_4 = q3 * M_3
     sep3 = calc_sep_from_period((M_3), M_4, orbital_period_quadruple)
     eccentricity3=0
         """,
                         spacingfunc="const({}, {}, {})".format(
-                            options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m3)",
-                            options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q3max",
-                            options['resolutions']['M'][2]
-                        )
+                            options["ranges"]["q"][0]
+                            if options.get("ranges", {}).get("q", None)
+                            else "options.get('Mmin', 0.07)/(m3)",
+                            options["ranges"]["q"][1]
+                            if options.get("ranges", {}).get("q", None)
+                            else "q3max",
+                            options["resolutions"]["M"][2],
+                        ),
                     )
-    # TODO: Ask about which periods should be used for the calc_sep_from_period
+                    # TODO: Ask about which periods should be used for the calc_sep_from_period
 
                     # (optional) triples: eccentricity
-                    if options['resolutions']['ecc'][2] > 0:
+                    if options["resolutions"]["ecc"][2] > 0:
                         self.add_grid_variable(
-                            name='ecc3',
-                            parameter_name='eccentricity3',
-                            longname='Eccentricity of the triple+quadruple/outer binary',
-                            resolution=options['resolutions']['ecc'][2],
+                            name="ecc3",
+                            parameter_name="eccentricity3",
+                            longname="Eccentricity of the triple+quadruple/outer binary",
+                            resolution=options["resolutions"]["ecc"][2],
                             probdist=1,
-                            gridtype='centred',
-                            dphasevol='decc3',
-                            precode='eccentricity3=ecc3',
+                            gridtype="centred",
+                            dphasevol="decc3",
+                            precode="eccentricity3=ecc3",
                             valuerange=[
-                                options['ranges']['ecc'][0], # Just fail if not defined.
-                                options['ranges']['ecc'][1]
+                                options["ranges"]["ecc"][
+                                    0
+                                ],  # Just fail if not defined.
+                                options["ranges"]["ecc"][1],
                             ],
                             spacingfunc="const({}, {}, {})".format(
-                                options['ranges']['ecc'][0], # Just fail if not defined.
-                                options['ranges']['ecc'][1],
-                                options['resolutions']['ecc'][2]
-                            )
+                                options["ranges"]["ecc"][
+                                    0
+                                ],  # Just fail if not defined.
+                                options["ranges"]["ecc"][1],
+                                options["resolutions"]["ecc"][2],
+                            ),
                         )
 
         # Now we are at the last part.
         # Here we should combine all the information that we calculate and update the options dictionary
         # This will then be passed to the Moe_de_Stefano_2017_pdf to calculate the real probability
-        # The trick we use is to strip the options_dict as a string and add some keys to it: 
+        # The trick we use is to strip the options_dict as a string and add some keys to it:
 
-        probdist_addition = "Moe_de_Stefano_2017_pdf({{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}})".format(str(options)[1:-1], 
+        probdist_addition = "Moe_de_Stefano_2017_pdf({{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}})".format(
+            str(options)[1:-1],
             '"multiplicity": multiplicity',
-
             '"M1": M_1',
             '"M2": M_2',
             '"M3": M_3',
             '"M4": M_4',
-
             '"P": orbital_period',
             '"P2": orbital_period_triple',
             '"P3": orbital_period_quadruple',
-
             '"ecc": eccentricity',
             '"ecc2": eccentricity2',
-            '"ecc3": eccentricity3'
+            '"ecc3": eccentricity3',
         )
 
         # and finally the probability calculator
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index ac9d12b00..958a49c38 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -30,7 +30,7 @@ grid_options_defaults_dict = {
     "combine_ensemble_with_thread_joining": True,  # Flag on whether to combine everything and return it to the user or if false: write it to data_dir/ensemble_output_{popuation_id}_{thread_id}.json
     # "output_dir":
     "_commandline_input": "",
-    "log_runtime_systems": 0, # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
+    "log_runtime_systems": 0,  # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
     ##########################
     # Execution log:
     ##########################
@@ -465,6 +465,110 @@ grid_options_descriptions = {
     "log_runtime_systems": "Whether to log the runtime of the systems . Each systems run by the thread is logged to a file and is stored in the tmp_dir. (1 file per thread). Don't use this if you are planning to run alot of systems. This is mostly for debugging and finding systems that take long to run. Integer, default = 0. if value is 1 then the systems are logged",
 }
 
+###
+#
+
+# Default options for the Moe & Distefano grid
+moe_distefano_default_options = {
+    "resolutions": {
+        "M": [
+            100,  # M1 (irrelevant if time adaptive)
+            40,  # M2 (i.e. q)
+            10,  # M3 currently unused
+            10,  # M4 currently unused
+        ],
+        "logP": [
+            40,  # P2 (binary period)
+            2,  # P3 (triple period) currently unused
+            2,  # P4 (quadruple period) currently unused
+        ],
+        "ecc": [
+            10,  # e2 (binary eccentricity) currently unused
+            10,  # e3 (triple eccentricity) currently unused
+            10,  # e4 (quadruple eccentricity) currently unused
+        ],
+    },
+    "ranges": {
+        # stellar masses (Msun)
+        "M": [
+            0.07,  # maximum brown dwarf mass == minimum stellar mass
+            80.0,  # (rather arbitrary) upper mass cutoff
+        ],
+        "q": [
+            None,  # artificial qmin : set to None to use default
+            None,  # artificial qmax : set to None to use default
+        ],
+        "logP": [0.0, 8.0],  # 0 = log10(1 day)  # 8 = log10(10^8 days)
+        "ecc": [0.0, 1.0],
+    },
+    # minimum stellar mass
+    "Mmin": 0.07,
+    # multiplicity model (as a function of log10M1)
+    #
+    # You can use 'Poisson' which uses the system multiplicty
+    # given by Moe and maps this to single/binary/triple/quad
+    # fractions.
+    #
+    # Alternatively, 'data' takes the fractions directly
+    # from the data, but then triples and quadruples are
+    # combined (and there are NO quadruples).
+    "multiplicity_model": "Poisson",
+    # multiplicity modulator:
+    # [single, binary, triple, quadruple]
+    #
+    # e.g. [1,0,0,0] for single stars only
+    #      [0,1,0,0] for binary stars only
+    #
+    # defaults to [1,1,1,1] i.e. all types
+    #
+    "multiplicity_modulator": [
+        1,  # single
+        1,  # binary
+        1,  # triple
+        1,  # quadruple
+    ],
+    # given a mix of multiplities, you can either (noting that
+    # here (S,B,T,Q) = appropriate modulator * model(S,B,T,Q) )
+    #
+    # 'norm'  : normalize so the whole population is 1.0
+    #           after implementing the appropriate fractions
+    #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
+    #
+    # 'raw'   : stick to what is predicted, i.e.
+    #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
+    #           without normalization
+    #           (in which case the total probability < 1.0 unless
+    #            all you use single, binary, triple and quadruple)
+    #
+    # 'merge' : e.g. if you only have single and binary,
+    #           add the triples and quadruples to the binaries, so
+    #           binaries represent all multiple systems
+    #           ...
+    #           *** this is canonical binary population synthesis ***
+    #
+    #           Note: if multiplicity_modulator == [1,1,1,1] this
+    #                 option does nothing (equivalent to 'raw').
+    #
+    #
+    # note: if you only set one multiplicity_modulator
+    # to 1, and all the others to 0, then normalizing
+    # will mean that you effectively have the same number
+    # of stars as single, binary, triple or quad (whichever
+    # is non-zero) i.e. the multiplicity fraction is ignored.
+    # This is probably not useful except for
+    # testing purposes or comparing to old grids.
+    "normalize_multiplicities": "merge",
+    # q extrapolation (below 0.15) method
+    # none
+    # flat
+    # linear2
+    # plaw2
+    # nolowq
+    "q_low_extrapolation_method": "linear2",
+    "q_high_extrapolation_method": "linear2",
+}
+
+
 #################################
 # Grid options functions
 
diff --git a/binarycpython/utils/useful_funcs.py b/binarycpython/utils/useful_funcs.py
index 419407b66..acf28d6a5 100644
--- a/binarycpython/utils/useful_funcs.py
+++ b/binarycpython/utils/useful_funcs.py
@@ -26,8 +26,6 @@ AURSUN = 2.150445198804013386961742071435e02
 YEARDY = 3.651995478818308811241877265275e02
 
 
-
-
 def minimum_period_for_RLOF(M1, M2, metallicity, store_memaddr=-1):
     """
     Wrapper function for _binary_c_bindings.return_minimum_orbit_for_RLOF
@@ -44,19 +42,27 @@ def minimum_period_for_RLOF(M1, M2, metallicity, store_memaddr=-1):
 
     """
 
-    bse_dict = {"M_1": M1, "M_2": M2, "metallicity": metallicity, "minimum_orbital_period_for_instant_RLOF": 1, "minimum_separation_for_instant_RLOF": 1}
+    bse_dict = {
+        "M_1": M1,
+        "M_2": M2,
+        "metallicity": metallicity,
+        "minimum_orbital_period_for_instant_RLOF": 1,
+        "minimum_separation_for_instant_RLOF": 1,
+    }
 
     argstring = "binary_c " + create_arg_string(bse_dict)
 
     output = _binary_c_bindings.return_minimum_orbit_for_RLOF(argstring, store_memaddr)
 
-    minimum_period = float(re.search('MINIMUM PERIOD (.*)', output).group(1))
+    minimum_period = float(re.search("MINIMUM PERIOD (.*)", output).group(1))
 
     return minimum_period
 
+
 # print(minimum_period_for_RLOF(10, 5, 0.02))
 # print(minimum_period_for_RLOF(10, 2, 0.02))
 
+
 def minimum_separation_for_RLOF(M1, M2, metallicity, store_memaddr=-1):
     """
     Wrapper function for _binary_c_bindings.return_minimum_orbit_for_RLOF
@@ -73,19 +79,27 @@ def minimum_separation_for_RLOF(M1, M2, metallicity, store_memaddr=-1):
 
     """
 
-    bse_dict = {"M_1": M1, "M_2": M2, "metallicity": metallicity, "minimum_orbital_period_for_instant_RLOF": 1, "minimum_separation_for_instant_RLOF": 1}
+    bse_dict = {
+        "M_1": M1,
+        "M_2": M2,
+        "metallicity": metallicity,
+        "minimum_orbital_period_for_instant_RLOF": 1,
+        "minimum_separation_for_instant_RLOF": 1,
+    }
 
     argstring = "binary_c " + create_arg_string(bse_dict)
 
     output = _binary_c_bindings.return_minimum_orbit_for_RLOF(argstring, store_memaddr)
 
-    minimum_separation = float(re.search('MINIMUM SEPARATION (.*)', output).group(1))
+    minimum_separation = float(re.search("MINIMUM SEPARATION (.*)", output).group(1))
 
     return minimum_separation
 
+
 # print(minimum_separation_for_RLOF(0.08, 0.08, 0.00002))
 # print(minimum_separation_for_RLOF(10, 2, 0.02))
 
+
 def calc_period_from_sep(
     M1: Union[int, float], M2: Union[int, float], sep: Union[int, float]
 ) -> Union[int, float]:
diff --git a/examples/example_population.py b/examples/example_population.py
index 7f5f776ee..79458e5ce 100644
--- a/examples/example_population.py
+++ b/examples/example_population.py
@@ -3,7 +3,12 @@ import os
 # import json
 # import time
 from binarycpython.utils.grid import Population
-from binarycpython.utils.functions import get_help_all, get_help, create_hdf5, output_lines
+from binarycpython.utils.functions import (
+    get_help_all,
+    get_help,
+    create_hdf5,
+    output_lines,
+)
 from binarycpython.utils.custom_logging_functions import temp_dir
 
 #########################################################
@@ -11,6 +16,7 @@ from binarycpython.utils.custom_logging_functions import temp_dir
 # The use of help(<function>) is a good way to inspect what parameters are there to use
 #########################################################
 
+
 def parse_function(self, output):
     # EXAMPLE PARSE_FUNCTION
 
@@ -39,7 +45,7 @@ def parse_function(self, output):
             values = el.split()[1:]
             print(values)
 
-            if not len(parameters)==len(values):
+            if not len(parameters) == len(values):
                 print("Amount of column names isnt equal to amount of columns")
                 raise ValueError
 
@@ -50,6 +56,7 @@ def parse_function(self, output):
             with open(outfilename, "a") as f:
                 f.write(seperator.join(values) + "\n")
 
+
 # Create population object
 example_pop = Population()
 
@@ -67,14 +74,11 @@ example_pop.set(
     orbital_period=45000000080,  # bse_options
     max_evolution_time=15000,  # bse_options
     eccentricity=0.02,  # bse_options
-
     # Set companion to low mass
-    M_2=0.08, # Since in the example we run a single system, we should set the companion mass here. If we donm't do this, the code will complain. 
-
+    M_2=0.08,  # Since in the example we run a single system, we should set the companion mass here. If we donm't do this, the code will complain.
     # grid_options
     amt_cores=2,  # grid_options
     verbose=1,  # verbosity. Not fully configured correctly yet but having it value of 1 prints alot of stuff
-
     # Custom options # TODO: need to be set in grid_options probably
     data_dir=os.path.join(
         temp_dir(), "example_python_population_result"
@@ -122,11 +126,7 @@ if(stardata->star[0].stellar_type >= 13)
 )
 
 # Add grid variables
-resolution = {
-    "M_1": 20, 
-    'q': 20, 
-    'per': 40
-}
+resolution = {"M_1": 20, "q": 20, "per": 40}
 
 # Mass
 example_pop.add_grid_variable(
@@ -153,27 +153,26 @@ example_pop.add_grid_variable(
 #     dphasevol="dq",
 #     precode="M_2 = q * M_1",
 #     parameter_name="M_2",
-#     condition="",  # Impose a condition on this grid variable. Mostly for a check for yourself    
+#     condition="",  # Impose a condition on this grid variable. Mostly for a check for yourself
 # )
 
-# # 
+# #
 # test_pop.add_grid_variable(
 #    name="log10per", # in days
-#    longname="log10(Orbital_Period)", 
+#    longname="log10(Orbital_Period)",
 #    valuerange=[0.15, 5.5],
 #    resolution="{}".format(resolution["per"]),
 #    spacingfunc="const(0.15, 5.5, {})".format(resolution["per"]),
 #    precode="""orbital_period = 10** log10per
 # sep = calc_sep_from_period(M_1, M_2, orbital_period)
 # sep_min = calc_sep_from_period(M_1, M_2, 10**0.15)
-# sep_max = calc_sep_from_period(M_1, M_2, 10**5.5)""", 
+# sep_max = calc_sep_from_period(M_1, M_2, 10**5.5)""",
 #    probdist="sana12(M_1, M_2, sep, orbital_period, sep_min, sep_max, math.log10(10**0.15), math.log10(10**5.5), -0.55)",
 #    parameter_name="orbital_period",
 #    dphasevol="dlog10per",
 # )
 
 
-
 # Exporting of all the settings can be done with .export_all_info()
 # on default it exports everything, but can be supressed by turning it off:
 #   population settings (bse_options, grid_options, custom_options), turn off with include_population
@@ -195,7 +194,7 @@ example_pop.export_all_info()
 
 ## Executing a population
 ## This uses the values generated by the grid_variables
-example_pop.evolve() # TODO: update this function call
+example_pop.evolve()  # TODO: update this function call
 
 # Wrapping up the results to an hdf5 file can be done by using the create_hdf5
 # (<directory containing data and settings>) This function takes the settings file
diff --git a/setup.py b/setup.py
index dd9f9fa56..502a2067a 100644
--- a/setup.py
+++ b/setup.py
@@ -259,6 +259,7 @@ setup(
         "pandas",
         "astropy",
         "matplotlib",
+        "py_rinterpolate",
     ],
     include_package_data=True,
     ext_modules=[BINARY_C_PYTHON_API_MODULE],  # binary_c must be loaded
-- 
GitLab