From 0a139031e5b42e08807ed242a87d6ba3c3f02384 Mon Sep 17 00:00:00 2001
From: David Hendriks <davidhendriks93@gmail.com>
Date: Tue, 1 Jun 2021 16:36:49 +0100
Subject: [PATCH] Black formatting and changed made some minor changes in the
 queue code

---
 .../tests/test_grid_options_defaults.py       |   2 +-
 binarycpython/tests/test_spacing_functions.py |   2 +
 binarycpython/tests/test_useful_funcs.py      |   2 +-
 binarycpython/utils/distribution_functions.py |  44 ++-
 binarycpython/utils/functions.py              |  20 +-
 binarycpython/utils/grid.py                   | 276 +++++++++++-------
 binarycpython/utils/grid_options_defaults.py  |   8 +-
 binarycpython/utils/useful_funcs.py           |  15 +-
 setup.py                                      |   2 +-
 9 files changed, 241 insertions(+), 130 deletions(-)

diff --git a/binarycpython/tests/test_grid_options_defaults.py b/binarycpython/tests/test_grid_options_defaults.py
index a381f8036..8986e1ada 100644
--- a/binarycpython/tests/test_grid_options_defaults.py
+++ b/binarycpython/tests/test_grid_options_defaults.py
@@ -11,7 +11,7 @@ from binarycpython.utils.functions import (
 from binarycpython.utils.grid_options_defaults import (
     write_grid_options_to_rst_file,
     grid_options_help,
-    grid_options_description_checker
+    grid_options_description_checker,
 )
 
 binary_c_temp_dir = temp_dir()
diff --git a/binarycpython/tests/test_spacing_functions.py b/binarycpython/tests/test_spacing_functions.py
index b29290d0a..a61f72512 100644
--- a/binarycpython/tests/test_spacing_functions.py
+++ b/binarycpython/tests/test_spacing_functions.py
@@ -8,6 +8,7 @@ import numpy as np
 from binarycpython.utils.spacing_functions import const
 from binarycpython.utils.functions import Capturing
 
+
 class test_spacing_functions(unittest.TestCase):
     """
     Unit test for spacing functions
@@ -28,5 +29,6 @@ class test_spacing_functions(unittest.TestCase):
             msg="Output didn't contain SINGLE_STAR_LIFETIME",
         )
 
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/binarycpython/tests/test_useful_funcs.py b/binarycpython/tests/test_useful_funcs.py
index 9339d5459..bbab35f7a 100644
--- a/binarycpython/tests/test_useful_funcs.py
+++ b/binarycpython/tests/test_useful_funcs.py
@@ -15,7 +15,7 @@ from binarycpython.utils.useful_funcs import (
     roche_lobe,
     ragb,
     calc_sep_from_period,
-    calc_period_from_sep
+    calc_period_from_sep,
 )
 
 # class test_(unittest.TestCase):
diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py
index 0ef0c158a..9c2e39510 100644
--- a/binarycpython/utils/distribution_functions.py
+++ b/binarycpython/utils/distribution_functions.py
@@ -394,10 +394,11 @@ def gaussian(
         # normalize over given range
         # TODO: add loading into global var
         normalisation = gaussian_normalizing_const(mean, sigma, gmin, gmax)
-        prob = gaussian_func(x, mean, sigma)/normalisation
+        prob = gaussian_func(x, mean, sigma) / normalisation
 
     return prob
 
+
 #####
 # Mass distributions
 #####
@@ -1112,6 +1113,7 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options):
     # return array reference
     return result
 
+
 # @profile
 def build_q_table(options, m, p):
     ############################################################
@@ -1140,8 +1142,12 @@ def build_q_table(options, m, p):
     # TODO: fix this cache check.
     incache = False
     if Moecache.get("rinterpolator_q_metadata", None):
-        if (Moecache["rinterpolator_q_metadata"][m]) and (Moecache["rinterpolator_q_metadata"][p]):
-            if (Moecache["rinterpolator_q_metadata"][m] == options[m]) and (Moecache["rinterpolator_q_metadata"][p] == options[p]):
+        if (Moecache["rinterpolator_q_metadata"][m]) and (
+            Moecache["rinterpolator_q_metadata"][p]
+        ):
+            if (Moecache["rinterpolator_q_metadata"][m] == options[m]) and (
+                Moecache["rinterpolator_q_metadata"][p] == options[p]
+            ):
                 incache = True
         # print("INCACHE: {}".format(incache))
 
@@ -1395,20 +1401,32 @@ def build_q_table(options, m, p):
             tmp_table.append([q, qdata[q]])
 
         if Moecache.get("rinterpolator_q_given_{}_log10{}".format(m, p), None):
-            print("Present interpolator: {}".format(Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]))
+            print(
+                "Present interpolator: {}".format(
+                    Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
+                )
+            )
             print("Destroying present interpolator:")
             interpolator = Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
             print(interpolator)
             print(type(interpolator))
             print(dir(interpolator))
-            x=Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate([0.5])
+            x = Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate(
+                [0.5]
+            )
             print("Interpolated a value q=0.5: {}".format(x))
             Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].destroy()
             print(interpolator)
             print(type(interpolator))
             print(dir(interpolator))
-            print("Present interpolator: {}".format(Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]))
-            x=Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate([0.5])
+            print(
+                "Present interpolator: {}".format(
+                    Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
+                )
+            )
+            x = Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate(
+                [0.5]
+            )
             print("Interpolated a value q=0.5: {}".format(x))
             # del Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
 
@@ -1455,9 +1473,17 @@ def build_q_table(options, m, p):
                     print("Error: > 1e-6 in q probability integral: {}".format(I))
 
         # set this new table in the cache
-        print("STORING Q INTERPOLATOR AS {}".format("rinterpolator_q_given_{}_log10{}".format(m, p)))
+        print(
+            "STORING Q INTERPOLATOR AS {}".format(
+                "rinterpolator_q_given_{}_log10{}".format(m, p)
+            )
+        )
         Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator
-        print("STORed Q INTERPOLATOR AS {}".format("rinterpolator_q_given_{}_log10{}".format(m, p)))
+        print(
+            "STORed Q INTERPOLATOR AS {}".format(
+                "rinterpolator_q_given_{}_log10{}".format(m, p)
+            )
+        )
 
         if not Moecache.get("rinterpolator_q_metadata", None):
             Moecache["rinterpolator_q_metadata"] = {}
diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py
index 3ded7b1d9..3a054a3a0 100644
--- a/binarycpython/utils/functions.py
+++ b/binarycpython/utils/functions.py
@@ -33,7 +33,7 @@ import binarycpython.utils.moe_distefano_data as moe_distefano_data
 
 def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
     """
-    Function to subtract two dictionaries. 
+    Function to subtract two dictionaries.
 
     Only allows values to be either a dict or a numerical type
 
@@ -91,7 +91,9 @@ def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
             copy_dict = copy.deepcopy(dict_1[key])
             new_dict[key] = copy_dict
         else:
-            msg = "Error: using unsupported type for key {}: {}".format(key, type(dict_1[key]))
+            msg = "Error: using unsupported type for key {}: {}".format(
+                key, type(dict_1[key])
+            )
             print(msg)
             raise ValueError(msg)
 
@@ -107,7 +109,9 @@ def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
         elif isinstance(dict_2[key], dict):
             new_dict[key] = subtract_dicts({}, dict_2[key])
         else:
-            msg = "Error: using unsupported type for key {}: {}".format(key, type(dict_2[key]))
+            msg = "Error: using unsupported type for key {}: {}".format(
+                key, type(dict_2[key])
+            )
             print(msg)
             raise ValueError(msg)
 
@@ -157,7 +161,9 @@ def subtract_dicts(dict_1: dict, dict_2: dict) -> dict:
                 if not new_dict[key]:
                     del new_dict[key]
             else:
-                msg = "Error: using unsupported type for key {}: {}".format(key, type(dict_2[key]))
+                msg = "Error: using unsupported type for key {}: {}".format(
+                    key, type(dict_2[key])
+                )
                 print(msg)
                 raise ValueError(msg)
 
@@ -1476,7 +1482,11 @@ def merge_dicts(dict_1: dict, dict_2: dict) -> dict:
             else:
                 print(
                     "Object types {}: {} ({}), {} ({}) not supported.".format(
-                        key, dict_1[key], type(dict_1[key]), dict_2[key], type(dict_2[key])
+                        key,
+                        dict_1[key],
+                        type(dict_1[key]),
+                        dict_2[key],
+                        type(dict_2[key]),
                     )
                 )
                 raise ValueError
diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index 398d6cd13..fa64c62bb 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -221,11 +221,12 @@ class Population:
         Function to handle settings values via the command line.
         Best to be called after all the .set(..) lines, and just before the .evolve() is called
 
-        If you input any known parameter (i.e. contained in grid_options, defaults/bse_options or custom_options),
-        this function will attempt to convert the input from string (because everything is string) to the type of
-        the value that option had before.
+        If you input any known parameter (i.e. contained in grid_options, defaults/bse_options
+        or custom_options), this function will attempt to convert the input from string
+        (because everything is string) to the type of the value that option had before.
 
-        The values of the bse_options are initially all strings, but after user input they can change to ints.
+        The values of the bse_options are initially all strings, but after user input they
+        can change to ints.
 
         The value of any new parameter (which will go to custom_options) will be a string.
 
@@ -292,7 +293,7 @@ class Population:
                         value = type(old_value)(value)
                         verbose_print("Success!", self.grid_options["verbosity"], 1)
 
-                    except ValueError as e:
+                    except ValueError:
                         verbose_print(
                             "Tried to convert the given parameter {}/value {} to its correct type {} (from old value {}). But that wasn't possible.".format(
                                 parameter, value, type(old_value), old_value
@@ -324,7 +325,8 @@ class Population:
 
     def last_grid_variable(self):
         """
-        Functon that returns the last grid variable (i.e. the one with the highest grid_variable_number)
+        Functon that returns the last grid variable
+        (i.e. the one with the highest grid_variable_number)
         """
 
         number = len(self.grid_options["_grid_variables"])
@@ -367,13 +369,15 @@ class Population:
 
         Args:
             name:
-                name of parameter. This is evaluated as a parameter and you can use it throughout the rest of the function
+                name of parameter. This is evaluated as a parameter and you can use it throughout
+                the rest of the function
                 example: name = 'lnm1'
             longname:
                 Long name of parameter
                 example: longname = 'Primary mass'
             range:
-                Range of values to take. Does not get used really, the spacingfunction is used to get the values from
+                Range of values to take. Does not get used really, the spacingfunction is used to
+                get the values from
                 example: range = [math.log(m_min), math.log(m_max)]
             resolution:
                 Resolution of the sampled range (amount of samples).
@@ -381,8 +385,12 @@ class Population:
                 example: resolution = resolution["M_1"]
             spacingfunction:
                 Function determining how the range is sampled. You can either use a real function,
-                or a string representation of a function call. Will get written to a file and then evaluated.
-                example: spacingfunction = "const(math.log(m_min), math.log(m_max), {})".format(resolution['M_1'])
+                or a string representation of a function call. Will get written to a file and
+                then evaluated.
+                example:
+                    spacingfunction = "const(math.log(m_min), math.log(m_max), {})".format(
+                        resolution['M_1']
+                    )
             precode:
                 Extra room for some code. This code will be evaluated within the loop of the
                 sampling function (i.e. a value for lnm1 is chosen already)
@@ -391,13 +399,16 @@ class Population:
                 FUnction determining the probability that gets asigned to the sampled parameter
                 example: probdist = 'Kroupa2001(M_1)*M_1'
             dphasevol:
-                part of the parameter space that the total probability is calculated with. Put to -1 if you want to ignore any dphasevol calculations and set the value to 1
+                part of the parameter space that the total probability is calculated with. Put to -1
+                if you want to ignore any dphasevol calculations and set the value to 1
                 example: dphasevol = 'dlnm1'
             condition:
                 condition that has to be met in order for the grid generation to continue
                 example: condition = 'self.grid_options['binary']==1'
             gridtype:
-                Method on how the value range is sampled. Can be either 'edge' (steps starting at the lower edge of the value range) or 'center' (steps starting at lower edge + 0.5 * stepsize).
+                Method on how the value range is sampled. Can be either 'edge' (steps starting at
+                the lower edge of the value range) or 'center'
+                (steps starting at lower edge + 0.5 * stepsize).
         """
 
         # TODO: Add check for the gridtype input value
@@ -477,10 +488,17 @@ class Population:
         Function that returns all the information about the population and binary_c
 
         Args:
-            include_population_settings: whether to include the population_settings (see function return_population_settings)
-            include_binary_c_defaults: whether to include a dict containing the binary_c parameters and their default values
-            include_binary_c_version_info: whether to include a dict containing all the binary_c version info (see return_binary_c_version_info)
-            include_binary_c_help_all: whether to include a dict containing all the information about the binary_c parameters (see get_help_all)
+            include_population_settings:
+                whether to include the population_settings (see function return_population_settings)
+            include_binary_c_defaults:
+                whether to include a dict containing the binary_c parameters and their default
+                values
+            include_binary_c_version_info:
+                whether to include a dict containing all the binary_c version info
+                (see return_binary_c_version_info)
+            include_binary_c_help_all:
+                whether to include a dict containing all the information about
+                the binary_c parameters (see get_help_all)
 
         Return:
             dictionary containing all, or part of, the above dictionaries
@@ -720,7 +738,9 @@ class Population:
             "start_timestamp": self.grid_options["_start_time_evolution"],
             "end_timestamp": self.grid_options["_end_time_evolution"],
             "total_mass_run": self.grid_options["_total_mass_run"],
-            "total_probability_weighted_mass_run": self.grid_options["_total_probability_weighted_mass_run"],
+            "total_probability_weighted_mass_run": self.grid_options[
+                "_total_probability_weighted_mass_run"
+            ],
         }
 
         # print(Moecache)
@@ -824,15 +844,14 @@ class Population:
                 0,
             )
 
-
     def get_stream_logger(self, level=logging.DEBUG):
         """Return logger with configured StreamHandler."""
-        stream_logger = logging.getLogger('stream_logger')
+        stream_logger = logging.getLogger("stream_logger")
         stream_logger.handlers = []
         stream_logger.setLevel(level)
         sh = logging.StreamHandler()
         sh.setLevel(level)
-        fmt = '[%(asctime)s %(levelname)-8s %(processName)s] --- %(message)s'
+        fmt = "[%(asctime)s %(levelname)-8s %(processName)s] --- %(message)s"
         formatter = logging.Formatter(fmt)
         sh.setFormatter(formatter)
         stream_logger.addHandler(sh)
@@ -844,19 +863,18 @@ class Population:
         Function that is responsible for keeping the queue filled.
 
         This will generate the systems until it is full, and then keeps trying to fill it.
-        Will have to play with the size of this. 
+        Will have to play with the size of this.
         """
         stream_logger = self.get_stream_logger()
         stream_logger.debug(f"setting up the system_queue_filler now")
 
-
         # Setup of the generator
         self._generate_grid_code(dry_run=False)
 
         self._load_grid_function()
 
         generator = self.grid_options["_system_generator"](self, print_results=False)
-        
+
         # TODO: build in method to handle with the HPC.
         # Continously fill the queue
         for system_number, system_dict in enumerate(generator):
@@ -866,7 +884,6 @@ class Population:
             # Print current size
             # print("Current size: {}".format(save_que.qsize()))
 
-
         # Send closing signal to workers. When they receive this they will terminate
         stream_logger.debug(f"Signaling stop to processes")  # DEBUG
         for _ in range(amt_cores):
@@ -912,15 +929,19 @@ class Population:
         # pathos_multiprocess
         queue_size = 1000
 
-
         manager = multiprocessing.Manager()
         job_queue = manager.Queue(maxsize=queue_size)
-        result_queue = manager.Queue(maxsize=self.grid_options['amt_cores'])
+        result_queue = manager.Queue(maxsize=self.grid_options["amt_cores"])
 
         # Create process instances
         processes = []
         for ID in range(self.grid_options["amt_cores"]):
-            processes.append(multiprocessing.Process(target=self._process_run_population_grid, args=(job_queue, result_queue, ID)))
+            processes.append(
+                multiprocessing.Process(
+                    target=self._process_run_population_grid,
+                    args=(job_queue, result_queue, ID),
+                )
+            )
 
         # Activate the processes
         for p in processes:
@@ -951,7 +972,9 @@ class Population:
         ]  # Ensemble results are also passed as output from that dictionary
 
         # Add some metadata
-        self.grid_ensemble_results['population_id'] = self.grid_options["_population_id"]
+        self.grid_ensemble_results["population_id"] = self.grid_options[
+            "_population_id"
+        ]
 
         self.grid_options["_failed_count"] = combined_output_dict["_failed_count"]
         self.grid_options["_failed_prob"] = combined_output_dict["_failed_prob"]
@@ -963,7 +986,9 @@ class Population:
         self.grid_options["_probtot"] = combined_output_dict["_probtot"]
         self.grid_options["_count"] = combined_output_dict["_count"]
         self.grid_options["_total_mass_run"] = combined_output_dict["_total_mass_run"]
-        self.grid_options["_total_probability_weighted_mass_run"] = combined_output_dict["_total_probability_weighted_mass_run"]
+        self.grid_options[
+            "_total_probability_weighted_mass_run"
+        ] = combined_output_dict["_total_probability_weighted_mass_run"]
 
     def _evolve_system_mp(self, full_system_dict):
         """
@@ -1021,7 +1046,8 @@ class Population:
         # Set to starting up
         with open(
             os.path.join(
-                self.grid_options["tmp_dir"], "process_status",
+                self.grid_options["tmp_dir"],
+                "process_status",
                 "process_{}.txt".format(self.process_ID),
             ),
             "w",
@@ -1075,13 +1101,14 @@ class Population:
         total_probability_weighted_mass_run = 0
 
         # Go over the queue
-        for system_number, system_dict in iter(job_queue.get, 'STOP'):
+        for system_number, system_dict in iter(job_queue.get, "STOP"):
             if localcounter == 0:
 
                 # Set status to running
                 with open(
                     os.path.join(
-                        self.grid_options["tmp_dir"], "process_status",
+                        self.grid_options["tmp_dir"],
+                        "process_status",
                         "process_{}.txt".format(self.process_ID),
                     ),
                     "w",
@@ -1095,7 +1122,7 @@ class Population:
             # In the first system, explicitly check all the keys that are passed to see if
             # they match the keys known to binary_c.
             # Won't do that every system cause that is a bit of a waste of computing time.
-            if number_of_systems_run==0:
+            if number_of_systems_run == 0:
                 # TODO: Put this someplace else and wrap in a functioncall
                 for key in full_system_dict.keys():
                     if not key in self.available_keys:
@@ -1126,14 +1153,14 @@ class Population:
                 self.grid_options["verbosity"],
                 2,
             )
-            stream_logger.debug("Process {} is handling system {}".format(ID, system_number))
 
             # In some cases, the whole run crashes. To be able to figure out which system
             # that was on, we log each current system to a file (each thread has one).
             # Each new system overrides the previous
             with open(
                 os.path.join(
-                    self.grid_options["tmp_dir"], "current_system",
+                    self.grid_options["tmp_dir"],
+                    "current_system",
                     "process_{}.txt".format(self.process_ID),
                 ),
                 "w",
@@ -1144,7 +1171,7 @@ class Population:
             start_runtime_binary_c = time.time()
 
             # Evolve the system
-            if self.grid_options['_actually_evolve_system']:
+            if self.grid_options["_actually_evolve_system"]:
                 self._evolve_system_mp(full_system_dict)
 
             end_runtime_binary_c = time.time()
@@ -1157,16 +1184,16 @@ class Population:
             if self.grid_options["log_runtime_systems"] == 1:
                 with open(
                     os.path.join(
-                        self.grid_options["tmp_dir"], "runtime_systems",
+                        self.grid_options["tmp_dir"],
+                        "runtime_systems",
                         "process_{}.txt".format(self.process_ID),
                     ),
                     "a+",
                 ) as f:
-                    binary_cmdline_string = self._return_argline(
-                        full_system_dict
-                    )
+                    binary_cmdline_string = self._return_argline(full_system_dict)
                     f.write(
-                        "{} {} '{}'\n".format(start_runtime_binary_c,
+                        "{} {} '{}'\n".format(
+                            start_runtime_binary_c,
                             end_runtime_binary_c - start_runtime_binary_c,
                             binary_cmdline_string,
                         )
@@ -1178,14 +1205,22 @@ class Population:
             localcounter += 1
 
             # Tally up some numbers
-            total_mass_system = full_system_dict.get("M_1", 0) + full_system_dict.get("M_1", 0) + full_system_dict.get("M_1", 0) + full_system_dict.get("M_1", 0)
+            total_mass_system = (
+                full_system_dict.get("M_1", 0)
+                + full_system_dict.get("M_2", 0)
+                + full_system_dict.get("M_3", 0)
+                + full_system_dict.get("M_4", 0)
+            )
             total_mass_run += total_mass_system
-            total_probability_weighted_mass_run += total_mass_system * full_system_dict["probability"]
+            total_probability_weighted_mass_run += (
+                total_mass_system * full_system_dict["probability"]
+            )
 
         # Set status to finishing
         with open(
             os.path.join(
-                self.grid_options["tmp_dir"], "process_status",
+                self.grid_options["tmp_dir"],
+                "process_status",
                 "process_{}.txt".format(self.process_ID),
             ),
             "w",
@@ -1209,7 +1244,7 @@ class Population:
                     self.persistent_data_memory_dict[self.process_ID]
                 )
             )
-            if ensemble_raw_output == None:
+            if ensemble_raw_output is None:
                 verbose_print(
                     "Process {}: Warning! Ensemble output is empty. ".format(ID),
                     self.grid_options["verbosity"],
@@ -1217,7 +1252,7 @@ class Population:
                 )
 
             #
-            if self.grid_options["combine_ensemble_with_thread_joining"] == True:
+            if self.grid_options["combine_ensemble_with_thread_joining"] is True:
                 verbose_print(
                     "Process {}: Extracting ensemble info from raw string".format(ID),
                     self.grid_options["verbosity"],
@@ -1229,7 +1264,8 @@ class Population:
                 )  # Load this into a dict so that we can combine it later
 
             else:
-                # If we do not allow this, automatically we will export this to the data_dir, in some formatted way
+                # If we do not allow this, automatically we will export this to the data_dir, in
+                # some formatted way
                 output_file = os.path.join(
                     self.custom_options["data_dir"],
                     "ensemble_output_{}_{}.json".format(
@@ -1287,28 +1323,34 @@ class Population:
 
         # Write summary
         summary_dict = {
-            'population_id': self.grid_options["_population_id"],
-            'process_id': self.process_ID,
-            'start_process_time': start_process_time.timestamp(),
-            'end_process_time': end_process_time.timestamp(),
-            'total_time_calling_binary_c': total_time_calling_binary_c,
-            'number_of_systems_run': number_of_systems_run,
-            'probability_of_systems_run': probability_of_systems_run,
-            'failed_systems': self.grid_options["_failed_count"],
-            'failed_probability': self.grid_options["_failed_prob"],
-            'failed_system_error_codes': self.grid_options[
+            "population_id": self.grid_options["_population_id"],
+            "process_id": self.process_ID,
+            "start_process_time": start_process_time.timestamp(),
+            "end_process_time": end_process_time.timestamp(),
+            "total_time_calling_binary_c": total_time_calling_binary_c,
+            "number_of_systems_run": number_of_systems_run,
+            "probability_of_systems_run": probability_of_systems_run,
+            "failed_systems": self.grid_options["_failed_count"],
+            "failed_probability": self.grid_options["_failed_prob"],
+            "failed_system_error_codes": self.grid_options[
                 "_failed_systems_error_codes"
             ],
         }
         with open(
-            os.path.join(self.grid_options["tmp_dir"], "process_summary", "process_{}.json".format(self.process_ID)), 'w'
+            os.path.join(
+                self.grid_options["tmp_dir"],
+                "process_summary",
+                "process_{}.json".format(self.process_ID),
+            ),
+            "w",
         ) as f:
             f.write(json.dumps(summary_dict, indent=4))
 
         # Set status to running
         with open(
             os.path.join(
-                self.grid_options["tmp_dir"], "process_status",
+                self.grid_options["tmp_dir"],
+                "process_status",
                 "process_{}.txt".format(self.process_ID),
             ),
             "w",
@@ -1379,11 +1421,21 @@ class Population:
         """
 
         # Make sure the subdirs of the tmp dir exist:
-        os.makedirs(os.path.join(self.grid_options["tmp_dir"], "failed_systems"), exist_ok=True)
-        os.makedirs(os.path.join(self.grid_options["tmp_dir"], "current_system"), exist_ok=True)
-        os.makedirs(os.path.join(self.grid_options["tmp_dir"], "process_status"), exist_ok=True)
-        os.makedirs(os.path.join(self.grid_options["tmp_dir"], "process_summary"), exist_ok=True)
-        os.makedirs(os.path.join(self.grid_options["tmp_dir"], "runtime_systems"), exist_ok=True)
+        os.makedirs(
+            os.path.join(self.grid_options["tmp_dir"], "failed_systems"), exist_ok=True
+        )
+        os.makedirs(
+            os.path.join(self.grid_options["tmp_dir"], "current_system"), exist_ok=True
+        )
+        os.makedirs(
+            os.path.join(self.grid_options["tmp_dir"], "process_status"), exist_ok=True
+        )
+        os.makedirs(
+            os.path.join(self.grid_options["tmp_dir"], "process_summary"), exist_ok=True
+        )
+        os.makedirs(
+            os.path.join(self.grid_options["tmp_dir"], "runtime_systems"), exist_ok=True
+        )
 
         # Check for parse function
         if not self.grid_options["parse_function"]:
@@ -1571,7 +1623,7 @@ class Population:
         # TODO: Check whether all the probability and phasevol values are correct.
         # TODO: import only the necessary packages/functions
         # TODO: Put all the masses, eccentricities and periods in there already
-        # TODO: Put the certain blocks that are repeated in some subfunctions 
+        # TODO: Put the certain blocks that are repeated in some subfunctions
 
         Results in a generated file that contains a system_generator function.
         """
@@ -1693,7 +1745,7 @@ class Population:
             # TODO: Make clear that the phasevol only works good
             # TODO: add option to ignore this phasevol calculation and set it to 1
             #   if you sample linearly in that thing.
-            # if phasevol is <= 0 then we SKIP that whole loop. Its not working then. 
+            # if phasevol is <= 0 then we SKIP that whole loop. Its not working then.
             if (
                 not grid_variable["dphasevol"] == -1
             ):  # A method to turn off this calculation and allow a phasevol = 1
@@ -1771,9 +1823,7 @@ class Population:
             # Add phasevol check action:
             code_string += (
                 indent * (depth + 2)
-                + 'print("phasevol_{} <= 0!")'.format(
-                    grid_variable["name"]
-                )
+                + 'print("phasevol_{} <= 0!")'.format(grid_variable["name"])
                 + "\n"
             )
             code_string += indent * (depth + 2) + "continue" + "\n"
@@ -1874,7 +1924,14 @@ class Population:
             # whether this is the last loop.
             if loopnr == len(self.grid_options["_grid_variables"]) - 1:
 
-                code_string = self._write_gridcode_system_call(code_string, indent, depth, grid_variable, dry_run, grid_variable['branchpoint'])
+                code_string = self._write_gridcode_system_call(
+                    code_string,
+                    indent,
+                    depth,
+                    grid_variable,
+                    dry_run,
+                    grid_variable["branchpoint"],
+                )
 
             # increment depth
             depth += 1
@@ -1885,15 +1942,13 @@ class Population:
         # Write parts to write below the part that yield the results.
         # this has to go in a reverse order:
         # Here comes the stuff that is put after the deepest nested part that calls returns stuff.
-        # Here we will have a 
+        # Here we will have a
         reverse_sorted_grid_variables = sorted(
-                self.grid_options["_grid_variables"].items(),
-                key=lambda x: x[1]["grid_variable_number"],
-                reverse=True,
+            self.grid_options["_grid_variables"].items(),
+            key=lambda x: x[1]["grid_variable_number"],
+            reverse=True,
         )
-        for loopnr, grid_variable_el in enumerate(
-            reverse_sorted_grid_variables
-        ):
+        for loopnr, grid_variable_el in enumerate(reverse_sorted_grid_variables):
             grid_variable = grid_variable_el[1]
 
             code_string += indent * (depth + 1) + "#" * 40 + "\n"
@@ -1911,15 +1966,17 @@ class Population:
 
             depth -= 1
 
-            # Check the branchpoint part here. The branchpoint makes sure that we can construct 
-            # a grid with several multiplicities and still can make the system calls for each 
+            # Check the branchpoint part here. The branchpoint makes sure that we can construct
+            # a grid with several multiplicities and still can make the system calls for each
             # multiplicity without reconstructing the grid each time
-            if grid_variable['branchpoint'] == 1:
+            if grid_variable["branchpoint"] == 1:
 
                 # Add comment
                 code_string += (
                     indent * (depth + 1)
-                    + "# Condition for branchpoint at {}".format(reverse_sorted_grid_variables[loopnr+1][1]["parameter_name"])
+                    + "# Condition for branchpoint at {}".format(
+                        reverse_sorted_grid_variables[loopnr + 1][1]["parameter_name"]
+                    )
                     + "\n"
                 )
 
@@ -1930,7 +1987,14 @@ class Population:
                     + "\n"
                 )
 
-                code_string = self._write_gridcode_system_call(code_string, indent, depth+1, reverse_sorted_grid_variables[loopnr+1][1], dry_run, grid_variable['branchpoint'])
+                code_string = self._write_gridcode_system_call(
+                    code_string,
+                    indent,
+                    depth + 1,
+                    reverse_sorted_grid_variables[loopnr + 1][1],
+                    dry_run,
+                    grid_variable["branchpoint"],
+                )
                 code_string += "\n"
 
         ################
@@ -1980,8 +2044,9 @@ class Population:
         with open(gridcode_filename, "w") as file:
             file.write(code_string)
 
-
-    def _write_gridcode_system_call(self, code_string, indent, depth, grid_variable, dry_run, branchpoint):
+    def _write_gridcode_system_call(
+        self, code_string, indent, depth, grid_variable, dry_run, branchpoint
+    ):
         #################################################################################
         # Here are the calls to the queuing or other solution. this part is for every system
         # Add comment
@@ -2016,9 +2081,7 @@ class Population:
 
         # For each repeat of the system this has to be done yes.
         code_string += (
-            indent * (depth + 1)
-            + 'for _ in range(self.grid_options["repeat"]):'
-            + "\n"
+            indent * (depth + 1) + 'for _ in range(self.grid_options["repeat"]):' + "\n"
         )
 
         code_string += indent * (depth + 2) + "_total_starcount += 1\n"
@@ -2044,9 +2107,7 @@ class Population:
         # code_string += indent * (depth + 1) + "print(probability)\n"
 
         # Increment total probability
-        code_string += (
-            indent * (depth + 2) + "self._increment_probtot(probability)\n"
-        )
+        code_string += indent * (depth + 2) + "self._increment_probtot(probability)\n"
 
         if not dry_run:
             # Handling of what is returned, or what is not.
@@ -2994,10 +3055,9 @@ class Population:
                     argstring = self._return_argline(system_dict)
                     with open(
                         os.path.join(
-                            self.grid_options["tmp_dir"], "failed_systems", 
-                            "process_{}.txt".format(
-                                self.process_ID
-                            ),
+                            self.grid_options["tmp_dir"],
+                            "failed_systems",
+                            "process_{}.txt".format(self.process_ID),
                         ),
                         "a+",
                     ) as f:
@@ -3158,7 +3218,10 @@ class Population:
                         / dlog10P,
                     ]
                 )
-                print("Size period_distributions table: {}", len(Moecache["period_distributions"]))
+                print(
+                    "Size period_distributions table: {}",
+                    len(Moecache["period_distributions"]),
+                )
 
                 ############################################################
                 # distributions as a function of mass, period, q
@@ -3237,7 +3300,11 @@ class Population:
                     0.0,
                 ]
             )
-            print("Size period_distributions table: {}".format(len(Moecache["period_distributions"])))
+            print(
+                "Size period_distributions table: {}".format(
+                    len(Moecache["period_distributions"])
+                )
+            )
 
         # Write to logfile
         with open("/tmp/moecache.json", "w") as cache_filehandle:
@@ -3384,11 +3451,9 @@ qmax=maximum_mass_ratio_for_RLOF(M_1, orbital_period)
                     options["ranges"]["q"][0]
                     if options.get("ranges", {}).get("q", None)
                     else "options.get('Mmin', 0.07)/M_1",
-
                     options["ranges"]["q"][1]
                     if options.get("ranges", {}).get("q", None)
                     else "qmax",
-
                 ],
                 resolution=options["resolutions"]["M"][1],
                 probdist=1,
@@ -3401,12 +3466,10 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period)
                 spacingfunc="const({}, {}, {})".format(
                     options["ranges"]["q"][0]
                     if options.get("ranges", {}).get("q", [None, None])[0]
-                    else "{}/M_1".format(options.get('Mmin', 0.07)),
-
+                    else "{}/M_1".format(options.get("Mmin", 0.07)),
                     options["ranges"]["q"][1]
                     if options.get("ranges", {}).get("q", [None, None])[1]
                     else "qmax",
-
                     options["resolutions"]["M"][1],
                 ),
             )
@@ -3611,9 +3674,10 @@ eccentricity3=0
                         )
 
         # Now we are at the last part.
-        # Here we should combine all the information that we calculate and update the options dictionary
-        # This will then be passed to the Moe_de_Stefano_2017_pdf to calculate the real probability
-        # The trick we use is to strip the options_dict as a string and add some keys to it:
+        # Here we should combine all the information that we calculate and update the options
+        # dictionary. This will then be passed to the Moe_de_Stefano_2017_pdf to calculate
+        # the real probability. The trick we use is to strip the options_dict as a string
+        # and add some keys to it:
 
         probdist_addition = "Moe_de_Stefano_2017_pdf({{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}})".format(
             str(options)[1:-1],
@@ -3631,4 +3695,6 @@ eccentricity3=0
         )
 
         # and finally the probability calculator
-        self.grid_options['_grid_variables'][self.last_grid_variable()]['probdist'] = probdist_addition
+        self.grid_options["_grid_variables"][self.last_grid_variable()][
+            "probdist"
+        ] = probdist_addition
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index b40b674dd..46a097358 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -33,7 +33,7 @@ grid_options_defaults_dict = {
     # "output_dir":
     "_commandline_input": "",
     "log_runtime_systems": 0,  # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
-    "_actually_evolve_system": True, # Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid
+    "_actually_evolve_system": True,  # Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid
     ##########################
     # Execution log:
     ##########################
@@ -98,8 +98,8 @@ grid_options_defaults_dict = {
     "failed_systems_threshold": 20,  # Maximum failed systems per process allowed to fail before the process stops logging the failing systems.
     "_failed_systems_error_codes": [],  # List to store the unique error codes
     "_population_id": 0,  # Random id of this grid/population run, Unique code for the population. Should be set only once by the controller process.
-    "_total_mass_run": 0, # To count the total mass that thread/process has ran
-    "_total_probability_weighted_mass_run": 0, # To count the total mass * probability for each system that thread/process has ran 
+    "_total_mass_run": 0,  # To count the total mass that thread/process has ran
+    "_total_probability_weighted_mass_run": 0,  # To count the total mass * probability for each system that thread/process has ran
     "modulo": 1,  # run modulo n of the grid. #TODO: fix this
     ## Grid type evolution
     "_grid_variables": {},  # grid variables
@@ -470,7 +470,7 @@ grid_options_descriptions = {
     "log_runtime_systems": "Whether to log the runtime of the systems . Each systems run by the thread is logged to a file and is stored in the tmp_dir. (1 file per thread). Don't use this if you are planning to run alot of systems. This is mostly for debugging and finding systems that take long to run. Integer, default = 0. if value is 1 then the systems are logged",
     "_total_mass_run": "To count the total mass that thread/process has ran",
     "_total_probability_weighted_mass_run": "To count the total mass * probability for each system that thread/process has ran",
-    "_actually_evolve_system": "Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid"
+    "_actually_evolve_system": "Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid",
 }
 
 ###
diff --git a/binarycpython/utils/useful_funcs.py b/binarycpython/utils/useful_funcs.py
index 53d1abede..8e9901053 100644
--- a/binarycpython/utils/useful_funcs.py
+++ b/binarycpython/utils/useful_funcs.py
@@ -100,7 +100,10 @@ def minimum_separation_for_RLOF(M1, M2, metallicity, store_memaddr=-1):
 # print(minimum_separation_for_RLOF(0.08, 0.08, 0.00002))
 # print(minimum_separation_for_RLOF(10, 2, 0.02))
 
-def maximum_mass_ratio_for_RLOF(M1, orbital_period, metallicity=0.02, store_memaddr=None):
+
+def maximum_mass_ratio_for_RLOF(
+    M1, orbital_period, metallicity=0.02, store_memaddr=None
+):
     """
     Wrapper function for _binary_c_bindings.return_maximum_mass_ratio_for_RLOF
 
@@ -115,8 +118,8 @@ def maximum_mass_ratio_for_RLOF(M1, orbital_period, metallicity=0.02, store_mema
         maximum mass ratio that just does not cause a RLOF at ZAMS
     """
 
-    # Convert to orbital period in years 
-    orbital_period = orbital_period/3.651995478818308811241877265275e+02
+    # Convert to orbital period in years
+    orbital_period = orbital_period / 3.651995478818308811241877265275e02
 
     bse_dict = {
         "M_1": M1,
@@ -128,7 +131,9 @@ def maximum_mass_ratio_for_RLOF(M1, orbital_period, metallicity=0.02, store_mema
     }
 
     argstring = "binary_c " + create_arg_string(bse_dict)
-    output = _binary_c_bindings.return_maximum_mass_ratio_for_RLOF(argstring, store_memaddr)
+    output = _binary_c_bindings.return_maximum_mass_ratio_for_RLOF(
+        argstring, store_memaddr
+    )
     stripped = output.strip()
 
     if stripped == "NO MAXIMUM MASS RATIO < 1":
@@ -137,9 +142,11 @@ def maximum_mass_ratio_for_RLOF(M1, orbital_period, metallicity=0.02, store_mema
         maximum_mass_ratio = float(stripped.split()[-1])
     return maximum_mass_ratio
 
+
 # print(maximum_mass_ratio_for_RLOF(4, 0.1, 0.002))
 # print(maximum_mass_ratio_for_RLOF(4, 1, 0.002))
 
+
 def calc_period_from_sep(
     M1: Union[int, float], M2: Union[int, float], sep: Union[int, float]
 ) -> Union[int, float]:
diff --git a/setup.py b/setup.py
index 966c01581..0903d8c4d 100644
--- a/setup.py
+++ b/setup.py
@@ -210,7 +210,7 @@ BINARY_C_PYTHON_API_MODULE = Extension(
     extra_compile_args=[],
     language="C",
 )
-headers=['src/includes/header.h']
+headers = ["src/includes/header.h"]
 ############################################################
 # Making the extension function
 ############################################################
-- 
GitLab