diff --git a/.gitignore b/.gitignore
index 518fba56add59d4bdd3cd763cf243297a5b3d5c3..ac9cdeb6bdb5f1f01be936815274649b4f3c7e71 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,7 +42,6 @@ __pycache__/
 *.sqlite3
 *.swp
 *.swo
-*.pdf
 
 # C extensions
 *.so
diff --git a/.gitmodules b/.gitmodules
index 38b8bc1019b78abd679697d267b8643eaf908070..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +0,0 @@
-[submodule "papers/joss_paper"]
-	path = papers/joss_paper
-	url = git@gitlab.com:dhendriks/paper_joss_binarycpython.git
diff --git a/VERSION b/VERSION
index a602fc9e283389306d606c164ce73ac338999373..b0bb878545dc6dc410a02b0df8b7ea9fd5705960 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.9.4
+0.9.5
diff --git a/binarycpython/tests/test_grid.py b/binarycpython/tests/test_grid.py
index 09b429dd1ef55c978d265243f7e1c9803d53b499..c2e3eeecdc6c87b1d78f155d3b2054dd46475c00 100644
--- a/binarycpython/tests/test_grid.py
+++ b/binarycpython/tests/test_grid.py
@@ -198,8 +198,10 @@ class test_cmdline(unittest.TestCase):
         self.assertTrue(isinstance(test_pop.bse_options["metallicity"], str))
         self.assertTrue(test_pop.bse_options["metallicity"] == "0.0002")
 
-        # Amt cores
-        self.assertTrue(isinstance(test_pop.grid_options["num_cores"], int))
+        # Number of cores
+        self.assertTrue(isinstance(test_pop.grid_options["num_cores"], int) or \
+                        isinstance(test_pop.grid_options["num_cores"], dict) or \
+                        isinstance(test_pop.grid_options["num_cores"], str))
         self.assertTrue(test_pop.grid_options["num_cores"] == 2)
 
         # datadir
diff --git a/binarycpython/utils/ensemble.py b/binarycpython/utils/ensemble.py
index 4c27268e0d52612a593c33515948525a9ae5549f..a1d3e7b5c47190c6400b21d42ace899f773cc863 100644
--- a/binarycpython/utils/ensemble.py
+++ b/binarycpython/utils/ensemble.py
@@ -115,7 +115,12 @@ def ensemble_file_type(filename):
 
 
 def load_ensemble(
-    filename, convert_float_keys=True, select_keys=None, timing=False, flush=False
+        filename,
+        convert_float_keys=True,
+        select_keys=None,
+        timing=False,
+        flush=False,
+        quiet=False
 ):
     """
     Function to load an ensemeble file, even if it is compressed,
@@ -129,7 +134,8 @@ def load_ensemble(
     # open the file
 
     # load with some info to the terminal
-    print("Loading JSON...", flush=flush)
+    if quiet == False:
+        print("Loading JSON...", flush=flush)
 
     # open the ensemble and get the file type
     file_object = open_ensemble(filename)
@@ -142,47 +148,64 @@ def load_ensemble(
         )
         sys.exit()
 
-    with Halo(text="Loading", interval=250, spinner="moon", color="yellow"):
+    if quiet == True:
         tstart = time.time()
-        _loaded = False
-
-        def _hook(obj):
-            """
-            Hook to load ensemble
-            """
-
-            nonlocal _loaded
-            if _loaded is False:
-                _loaded = True
-                print(
-                    "\nLoaded {} data, now putting in a dictionary".format(filetype),
-                    flush=True,
-                )
-            return obj
-
         if filetype == "JSON":
-            # orjson promises to be fast, but it doesn't seem to be
-            # and fails on "Infinity"... oops
-            # data = orjson.loads(file_object.read())
-
-            # simplejson is faster than standard json and "just works"
-            # on the big Moe set in 37s
-            data = simplejson.load(file_object, object_hook=_hook)
+            data = simplejson.load(file_object)
             file_object.close()
-
-            # standard json module
-            # on the big Moe set takes 42s
-            # data = json.load(file_object,
-            #                 object_hook=_hook)
         elif filetype == "msgpack":
             data = msgpack.load(file_object, object_hook=_hook)
             file_object.close()
-
         if timing:
             print(
                 "\n\nTook {} s to load the data\n\n".format(time.time() - tstart),
                 flush=True,
             )
+    else:
+        with Halo(text="Loading", interval=250, spinner="moon", color="yellow"):
+            tstart = time.time()
+            _loaded = False
+
+            def _hook(obj):
+                """
+                Hook to load ensemble
+                """
+
+                nonlocal _loaded
+                if _loaded is False:
+                    _loaded = True
+                    print(
+                        "\nLoaded {} data, now putting in a dictionary".format(filetype),
+                        flush=True,
+                    )
+                return obj
+
+            if filetype == "JSON":
+                # orjson promises to be fast, but it doesn't seem to be
+                # and fails on "Infinity"... oops
+                # data = orjson.loads(file_object.read())
+
+                # simplejson is faster than standard json and "just works"
+                # on the big Moe set in 37s
+                if quiet == False:
+                    data = simplejson.load(file_object, object_hook=_hook)
+                else:
+                    data = simplejson.load(file_object)
+                file_object.close()
+
+                # standard json module
+                # on the big Moe set takes 42s
+                # data = json.load(file_object,
+                #                 object_hook=_hook)
+            elif filetype == "msgpack":
+                data = msgpack.load(file_object, object_hook=_hook)
+                file_object.close()
+
+            if timing:
+                print(
+                    "\n\nTook {} s to load the data\n\n".format(time.time() - tstart),
+                    flush=True,
+                )
 
     # strip non-selected keys, if a list is given in select_keys
     if select_keys:
diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py
index 2b3013ad9795af8e446fa33b642cf7cdb43e7c8d..79cb049e4f2f32be843df9d792652fe831fec56f 100644
--- a/binarycpython/utils/functions.py
+++ b/binarycpython/utils/functions.py
@@ -8,34 +8,32 @@ Tasks:
     - TODO: change all prints to verbose_prints
 """
 
-import os
-import sys
-import time
-import json
-import datetime
 import collections
-import resource
-import tempfile
-import subprocess
-import types
-
-from typing import Union
-from io import StringIO
-
+import datetime
 import h5py
 import humanize
+import json
 import numpy as np
+import os
+import platform
 import psutil
-from colorama import Fore, Back, Style
-
+import resource
+import tempfile
+import time
+import socket
+import subprocess
+import sys
+import types
 from binarycpython import _binary_c_bindings
 from binarycpython.utils.dicts import filter_dict_through_values
+from colorama import Fore, Back, Style
+from io import StringIO
+from typing import Union
 
 ########################################################
 # Unsorted
 ########################################################
 
-
 def now(now_object=None, style=None, custom_format=None):
     """
     convenience function to return a string of the current time, using the format ``%m/%d/%Y %H:%M:%S``
@@ -1221,3 +1219,15 @@ def command_string_from_list(list):
     Given a list, turn it into a quoted command string
     """
     return ' '.join(quotewrap(list))
+
+def hostnames():
+    """
+    Return a list of possible hostnames for this machine
+    """
+    return [
+        os.getenv('HOSTNAME'),
+        platform.uname()[1],
+        socket.gethostname(),
+        socket.getfqdn(socket.gethostname()),
+        platform.node()
+    ]
diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index a55f231ca31c2b8aed15e13b40ca444d538de176..fcaa4bf66e9cb2d5af3f9f7a9ca64cc42c40c6b0 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -7,46 +7,32 @@ TODO: the save_snapshots and save_snapshot, are they actually distinct?
 
 Tasks:
     - TODO: add functionality to 'on-init' set arguments
-    - TODO: add functionality to return the initial_abundance_hash
-    - TODO: add functionality to return the isotope_hash
-    - TODO: add functionality to return the isotope_list
-    - TODO: add functionality to return the nuclear_mass_hash
-    - TODO: add functionality to return the nuclear_mass_list
-    - TODO: add functionality to return the source_list
-    - TODO: add functionality to return the ensemble_list
+
     - TODO: consider spreading the functions over more files.
     - TODO: type the private functions
     - TODO: fix the correct object types for the default values of the bse_options
     - TODO: think of a clean and nice way to unload and remove the custom_logging_info library from memory (and from disk)
     - TODO: think of a nice way to remove the loaded grid_code/ generator from memory.
 
-TODO: Some of the methods that we have defined in the (mixin) class are designed to be used as a portal to information (return_binary_c_version_info for example.) THe current design is that they are all instance methods, but that is not always necessary. We can decorate them with @staticmethod, or @classmethod to make it easier to use them (https://realpython.com/instance-class-and-static-methods-demystified/)
+    - TODO: Some of the methods that we have defined in the (mixin) class are designed to be used as a portal to information (return_binary_c_version_info for example.) The current design is that they are all instance methods, but that is not always necessary. We can decorate them with @staticmethod, or @classmethod to make it easier to use them (https://realpython.com/instance-class-and-static-methods-demystified/)
 """
 
-import os
-import gc
-import sys
-import time
 import copy
-import json
-import uuid
-import queue
-import signal
 import datetime
 import functools
-import traceback
+import gc
+import json
 import multiprocessing
-
-from collections import (
-    OrderedDict,
-)
-from collections.abc import Iterable  # drop `.abc` with Python 2.7 or lower
-from typing import Union, Any
-
+import os
 import psutil
+import queue
 import setproctitle
+import signal
 import str2bool
-from colorama import init as colorama_init
+import sys
+import time
+import traceback
+import uuid
 
 from binarycpython.utils.functions import (
     check_if_in_shell,
@@ -57,6 +43,7 @@ from binarycpython.utils.functions import (
     mem_use,
     timedelta,
     now,
+    hostnames
 )
 from binarycpython.utils.ensemble import (
     binaryc_json_serializer,
@@ -68,7 +55,6 @@ from binarycpython.utils.dicts import (
     merge_dicts,
     keys_to_floats,
 )
-
 from binarycpython.utils.population_extensions.analytics import analytics
 from binarycpython.utils.population_extensions.cache import cache
 from binarycpython.utils.population_extensions.dataIO import dataIO
@@ -89,13 +75,17 @@ from binarycpython.utils.population_extensions.spacing_functions import (
     spacing_functions,
 )
 from binarycpython.utils.population_extensions.version_info import version_info
-
 from binarycpython import _binary_c_bindings
+from collections import (
+    OrderedDict,
+)
+from collections.abc import Iterable  # drop `.abc` with Python 2.7 or lower
+from colorama import init as colorama_init
+from typing import Union, Any
 
-# Initialise the colorama stuff
+# Initialise colorama
 colorama_init()
 
-
 class Population(
     analytics,
     cache,
@@ -672,9 +662,7 @@ class Population(
                         default=binaryc_json_serializer,
                         ensure_ascii=ensure_ascii,
                     )
-                print("ok pre")
                 self.locked_close(f, lock)
-                print("ok ret")
                 return settings_fullname
 
             # TODO: turn it around and have the exception be within the if statement
@@ -713,24 +701,111 @@ class Population(
         """
         Function to set the number of processes used in multiprocessing.
 
-        If grid_options['num_cores'] <= 0, set automatically
+        Test the value of grid_options['num_cores']:
+
+        * If 0 or 'logical' or 'all logical', use all logical cores.
+
+        * If -1 or 'physical' or 'all physical', use all physical cores.
+
+        * If integer >=1 use the number of cores given.
+
+        * If  a dict, (key,value) pairs are (hostname,num_cores)
+          for particular machines. We then try to match this machine to
+          the (hostname,num_cores) pairs to set num_cores. If not found,
+          default to either the dict "default" setting, or the number
+          of logical cores available.
+
+        * If grid_options['num_cores'] is missing, all logical cores are used.
+
+        Exit if none of the above are satisfied.
 
-        If grid_options['num_cores'] is 0, we use as many as we have available
         """
+        _vb = self.grid_options['verbosity']>=2
+
+        def __nprocs(ncores):
+            if type(ncores) is str:
+                if ncores == 'all' or ncores == 'logical' or ncores == 'all logical':
+                    # use all logical cores available to us
+                    return max(1, psutil.cpu_count(logical=True))
+                elif ncores == 'all physical' or ncores == 'physical':
+                    # use all physical cores available to us
+                    return max(1, psutil.cpu_count(logical=False))
+                else:
+                    # unknown string : fall back to one CPU
+                    print(f"grid_option['num_cores'] = \"{ncores}\" is not recognised.\nPlease consider either setting this option to a number, 'physical' or 'logical', or a dict of (host,num_cores) pairs.")
+                    self.exit(code=1)
+            if type(ncores) is int:
+                if ncores == -1:
+                    return max(1, psutil.cpu_count(logical=False))
+                elif ncores == 0:
+                    return max(1, psutil.cpu_count(logical=True))
+                else:
+                    return ncores
+            else:
+                # ncores should be int or string, oops
+                print(f"grid_option['num_cores'] = \"{ncores}\" is not recognised.\nPlease consider either setting this option to a number, 'physical' or 'logical', or a dict of (host,num_cores) pairs.")
+                self.exit(code=1)
+
+        if _vb:
+            print(f"NPROC: pre-eval \"{self.grid_options['num_cores']}\" type \"{type(self.grid_options['num_cores'])}\"")
+
+        # eval the incoming string if possible, it may be a JSON
+        # representation of a dict, and if this works we
+        # set the num_cores option to the eval'd version.
+        # It may also be a normal string or int, so we can't just
+        # use the JSON parser.
+        try:
+            _x = eval(self.grid_options['num_cores'])
+            self.grid_options['num_cores'] = _x
+        except:
+            pass
+
         # backwards compatibility
-        if "amt_cores" in self.grid_options:
-            self.grid_options["num_processes"] = self.grid_options["amt_cores"]
-            self.grid_options["num_cores"] = self.grid_options["amt_cores"]
-
-        if self.grid_options["num_cores"] == 0:
-            # use all logical cores available to us
-            self.grid_options["num_processes"] = max(1, psutil.cpu_count(logical=True))
-        elif self.grid_options["num_cores"] == -1:
-            # use all physical cores available to us
-            self.grid_options["num_processes"] = max(1, psutil.cpu_count(logical=False))
-        else:
-            # manually specify number of cores made available
-            self.grid_options["num_processes"] = self.grid_options["num_cores"]
+        if 'amt_cores' in self.grid_options:
+            self.grid_options['num_cores'] = self.grid_options['amt_cores']
+
+        if _vb:
+            print(f"NPROC: \"{self.grid_options['num_cores']}\" type \"{type(self.grid_options['num_cores'])}\"")
+
+        if 'num_cores' in self.grid_options:
+            if _vb:
+                print("NPROC found in grid options")
+            if type(self.grid_options['num_cores']) is dict:
+                # try to match hostname to the dict keys
+                hostnameslist = hostnames()
+                for host,ncores in self.grid_options['num_cores'].items():
+                    # check if we are this host
+                    if host in hostnameslist:
+                        self.grid_options['num_processes'] = __nprocs(ncores)
+                        if _vb:
+                            print(f"SET NPROC = {self.grid_options['num_processes']} FROM HOST {host}")
+                        return
+
+                # no host match : use default number of cores
+                if 'default' in self.grid_options['num_cores']:
+                    self.grid_options['num_processes'] = __nprocs(self.grid_options['num_cores']['default'])
+                    if _vb:
+                        print(f"SET NPROC = {self.grid_options['num_processes']} FROM default")
+                    return
+            elif type(self.grid_options['num_cores']) is str:
+                # just use int passed in
+                self.grid_options['num_processes'] =  __nprocs(self.grid_options['num_cores'])
+                if _vb:
+                    print(f"SET NPROC = {self.grid_options['num_processes']} from string passed in")
+                return
+            elif type(self.grid_options['num_cores']) is int:
+                # just use int passed in
+                self.grid_options['num_processes'] = __nprocs(self.grid_options['num_cores'])
+                if _vb:
+                    print(f"SET NPROC = {self.grid_options['num_processes']} from int passed in")
+                return
+
+        # no host matched or no 'num_cores' option provided: use all available logical cores
+        self.grid_options['num_processes'] = max(1, psutil.cpu_count(logical=True))
+        if _vb:
+            print(f"SET NPROC = {self.grid_options['num_processes']} because no option given")
+
+        return
 
     def _pre_run_setup(self) -> None:
         """
@@ -1267,12 +1342,10 @@ class Population(
         for output_dict in iter(result_queue.get, sentinel):
             if output_dict:
                 # don't let Xinit be added
-                if (
-                    "ensemble_results" in combined_output_dict
-                    and "ensemble" in combined_output_dict["ensemble_results"]
-                    and "Xinit" in combined_output_dict["ensemble_results"]["ensemble"]
-                ):
+                try:
                     del combined_output_dict["ensemble_results"]["ensemble"]["Xinit"]
+                except:
+                    pass
 
                 # merge dicts
                 combined_output_dict = merge_dicts(
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
deleted file mode 100644
index 02ecbf34f0a054c4f2f681121d93dc19047b7bf9..0000000000000000000000000000000000000000
--- a/binarycpython/utils/grid_options_defaults.py
+++ /dev/null
@@ -1,851 +0,0 @@
-"""
-Module that contains the default options for the population grid code along with the description for these options, in the form of dictionaries:
-    - grid_options_defaults_dict: dictionary containing the default values for all the options
-    - grid_options_descriptions: dictionary containing the description for these options.
-
-There are several other functions in this module, mostly to generate help texts or documents:
-    - grid_options_help: interactive function for the user to get descriptions for options
-    - grid_options_description_checker: function that checks that checks which options have a description.
-    - write_grid_options_to_rst_file: function to generate the .rst document for the docs
-
-With this its also possible to automatically generate a document containing all the setting names + descriptions.
-
-All the options starting with _ should not be changed by the user except when you really know what you're doing (which is probably hacking the code :P)
-"""
-
-import os
-
-from binarycpython.utils.custom_logging_functions import temp_dir
-from binarycpython.utils.functions import return_binary_c_version_info
-
-_LOGGER_VERBOSITY_LEVEL = 1
-_CUSTOM_LOGGING_VERBOSITY_LEVEL = 2
-_MOE2017_VERBOSITY_LEVEL = 5
-_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL = 6
-_MOE2017_VERBOSITY_INTERPOLATOR_EXTRA_LEVEL = 7
-
-# Options dict
-grid_options_defaults_dict = {
-    ##########################
-    # general (or unordered..)
-    ##########################
-    "num_cores": 1,  # total number of cores used to evolve the population
-    "parse_function": None,  # Function to parse the output with.
-    "multiplicity_fraction_function": 0,  # Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: Moe and di Stefano 2017
-    "tmp_dir": temp_dir(),  # Setting the temp dir of the program
-    "_main_pid": -1,  # Placeholder for the main process id of the run.
-    "save_ensemble_chunks": True,  # Force the ensemble chunk to be saved even if we are joining a thread (just in case the joining fails)
-    "combine_ensemble_with_thread_joining": True,  # Flag on whether to combine everything and return it to the user or if false: write it to data_dir/ensemble_output_{population_id}_{thread_id}.json
-    "_commandline_input": "",
-    "log_runtime_systems": 0,  # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
-    "_actually_evolve_system": True,  # Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid
-    "max_queue_size": 1000,  # Maximum size of the system call queue.
-    "run_zero_probability_system": True,  # Whether to run the zero probability systems
-    "_zero_prob_stars_skipped": 0,
-    "ensemble_factor_in_probability_weighted_mass": False,  # Whether to multiply the ensemble results by 1/probability_weighted_mass
-    "do_dry_run": True,  # Whether to do a dry run to calculate the total probability for this run
-    "custom_generator": None,  # Place for the custom system generator
-    "exit_after_dry_run": False,  # Exit after dry run?
-    ##########################
-    # Execution log:
-    ##########################
-    "verbosity": 0,  # Level of verbosity of the simulation
-    "log_file": os.path.join(
-        temp_dir(), "binary_c_python.log"
-    ),  # Set to None to not log to file. The directory will be created
-    "log_dt": 5,  # time between vb=1 logging outputs
-    "n_logging_stats": 50,  # number of logging stats used to calculate time remaining (etc.) default = 50
-    ##########################
-    # binary_c files
-    ##########################
-    "_binary_c_executable": os.path.join(
-        os.environ["BINARY_C"], "binary_c"
-    ),  # TODO: make this more robust
-    "_binary_c_shared_library": os.path.join(
-        os.environ["BINARY_C"], "src", "libbinary_c.so"
-    ),  # TODO: make this more robust
-    "_binary_c_config_executable": os.path.join(
-        os.environ["BINARY_C"], "binary_c-config"
-    ),  # TODO: make this more robust
-    "_binary_c_dir": os.environ["BINARY_C"],
-    ##########################
-    # Moe and di Stefano (2017) internal settings
-    ##########################
-    "_loaded_Moe2017_data": False,  # Holds flag whether the Moe and di Stefano (2017) data is loaded into memory
-    "_set_Moe2017_grid": False,  # Whether the Moe and di Stefano (2017) grid has been loaded
-    "Moe2017_options": None,  # Holds the Moe and di Stefano (2017) options.
-    "_Moe2017_JSON_data": None,  # Stores the data
-    ##########################
-    # Custom logging
-    ##########################
-    "C_auto_logging": None,  # Should contain a dictionary where the keys are they headers
-    # and the values are lists of parameters that should be logged.
-    # This will get parsed by autogen_C_logging_code in custom_logging_functions.py
-    "C_logging_code": None,  # Should contain a string which holds the logging code.
-    "custom_logging_func_memaddr": -1,  # Contains the custom_logging functions memory address
-    "_custom_logging_shared_library_file": None,  # file containing the .so file
-    ##########################
-    # Store pre-loading:
-    ##########################
-    "_store_memaddr": -1,  # Contains the store object memory address, useful for pre loading.
-    # defaults to -1 and isn't used if that's the default then.
-    ##########################
-    # Log args: logging of arguments
-    ##########################
-    "log_args": 0,  # unused
-    "log_args_dir": "/tmp/",  # unused
-    ##########################
-    # Population evolution
-    ##########################
-    ## General
-    "evolution_type": "grid",  # Flag for type of population evolution
-    "_evolution_type_options": [
-        "grid",
-        "custom_generator",
-        "source_file",
-    ],  # available choices for type of population evolution. # TODO: fill later with Monte Carlo, source file
-    "_system_generator": None,  # value that holds the function that generates the system
-    # (result of building the grid script)
-    "source_file_filename": None,  # filename for the source
-    "_count": 0,  # count of systems
-    "_total_starcount": 0,  # Total count of systems in this generator
-    "_probtot": 0,  # total probability
-    "weight": 1.0,  # weighting for the probability
-    "repeat": 1,  # number of times to repeat each system (probability is adjusted to be 1/repeat)
-    "_start_time_evolution": 0,  # Start time of the grid
-    "_end_time_evolution": 0,  # end time of the grid
-    "_errors_found": False,  # Flag whether there are any errors from binary_c
-    "_errors_exceeded": False,  # Flag whether the number of errors have exceeded the limit
-    "_failed_count": 0,  # number of failed systems
-    "_failed_prob": 0,  # Summed probability of failed systems
-    "failed_systems_threshold": 20,  # Maximum failed systems per process allowed to fail before the process stops logging the failing systems.
-    "_failed_systems_error_codes": [],  # List to store the unique error codes
-    "_population_id": 0,  # Random id of this grid/population run, Unique code for the population. Should be set only once by the controller process.
-    "_total_mass_run": 0,  # To count the total mass that thread/process has ran
-    "_total_probability_weighted_mass_run": 0,  # To count the total mass * probability for each system that thread/process has ran
-    "modulo": 1,  # run modulo n of the grid.
-    "start_at": 0,  # start at the first model
-    ## Grid type evolution
-    "_grid_variables": {},  # grid variables
-    "gridcode_filename": None,  # filename of gridcode
-    "symlink latest gridcode": True,  # symlink to latest gridcode
-    ## Monte carlo type evolution
-    # TODO: make MC options
-    ## Evolution from source file
-    # TODO: make run from sourcefile options.
-    ## Other no yet implemented parts for the population evolution part
-    #     # start at this model number: handy during debugging
-    #     # to skip large parts of the grid
-    #     start_at => 0
-    #     global_error_string => undef,
-    #     monitor_files => [],
-    #     nextlogtime   => 0,
-    #     nthreads      => 1, # number of threads
-    #     # start at model offset (0-based, so first model is zero)
-    #     offset        => 0,
-    #     resolution=>{
-    #         shift   =>0,
-    #         previous=>0,
-    #         n       =>{} # per-variable resolution
-    #     },
-    #     thread_q      => undef,
-    #     threads       => undef, # array of threads objects
-    #     tstart        => [gettimeofday], # flexigrid start time
-    #     __nvar        => 0, # number of grid variables
-    #     _varstub      => undef,
-    #     _lock         => undef,
-    #     _evcode_pids  => [],
-    # };
-    ########################################
-    # Slurm stuff
-    ########################################
-    "slurm": 0,  # dont use the slurm by default. 1 = use slurm
-    # "slurm_ntasks": 1,  # CPUs required per array job: usually only need this
-    # "slurm_command": "",  # Command that slurm runs (e.g. evolve or join_datafiles)
-    # "slurm_dir": "",  # working directory containing scripts output logs etc.
-    # "slurm_njobs": 0,  # number of scripts; set to 0 as default
-    # "slurm_jobid": "",  # slurm job id (%A)
-    # "slurm_memory": 512,  # in MB, the memory use of the job
-    # "slurm_warn_max_memory": 1024,  # in MB : warn if mem req. > this
-    # "slurm_use_all_node_CPUs": 0,  # 1 = use all of a node's CPUs. 0 = use a given number of CPUs
-    # "slurm_postpone_join": 0,  # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM)
-    # "slurm_jobarrayindex": "",  # slurm job array index (%a)
-    # "slurm_jobname": "binary_grid",  # default
-    # "slurm_partition": None,
-    # "slurm_time": 0,  # total time. 0 = infinite time
-    # "slurm_postpone_sbatch": 0,  # if 1: don't submit, just make the script
-    # "slurm_array": None,  # override for --array, useful for rerunning jobs
-    # "slurm_use_all_node_CPUs": 0,  # if given nodes, set to 1
-    # # if given CPUs, set to 0
-    # # you will want to use this if your Slurm SelectType is e.g. linear
-    # # which means it allocates all the CPUs in a node to the job
-    # "slurm_control_CPUs": 0,  # if so, leave this many for Pythons control (0)
-    # "slurm_array": None,  # override for --array, useful for rerunning jobs
-    # "slurm_partition": None,  # MUST be defined
-    # "slurm_extra_settings": {},  # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
-    ########################################
-    # Condor stuff
-    ########################################
-    "condor": 0,  # 1 to use condor, 0 otherwise
-    # "condor_command": "",  # condor command e.g. "evolve", "join"
-    # "condor_dir": "",  # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all)
-    # "condor_njobs": "",  # number of scripts/jobs that CONDOR will run in total
-    # "condor_jobid": "",  # condor job id
-    # "condor_postpone_join": 0,  # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM)
-    # # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
-    # "condor_join_pwd": "",  # directory the join should be in (defaults to $ENV{PWD} if undef)
-    # "condor_memory": 1024,  # in MB, the memory use (ImageSize) of the job
-    # "condor_universe": "vanilla",  # usually vanilla universe
-    # "condor_extra_settings": {},  # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
-    # snapshots and checkpoints
-    # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit
-    # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found
-    # condor_checkpoint_interval=>0, # checkpoint interval (seconds)
-    # condor_checkpoint_stamp_times=>0, # if 1 then files are given timestamped names
-    # (warning: lots of files!), otherwise just store the lates
-    # condor_streams=>0, # stream stderr/stdout by default (warning: might cause heavy network load)
-    # condor_save_joined_file=>0, # if 1 then results/joined contains the results
-    # (useful for debugging, otherwise a lot of work)
-    # condor_requirements=>'', # used?
-    #     # resubmit options : if the status of a condor script is
-    #     # either 'finished','submitted','running' or 'crashed',
-    #     # decide whether to resubmit it.
-    #     # NB Normally the status is empty, e.g. on the first run.
-    #     # These are for restarting runs.
-    #     condor_resubmit_finished=>0,
-    # condor_resubmit_submitted=>0,
-    # condor_resubmit_running=>0,
-    # condor_resubmit_crashed=>0,
-    ##########################
-    # Unordered. Need to go through this. Copied from the perl implementation.
-    ##########################
-    ##
-    # return_array_refs=>1, # quicker data parsing mode
-    # sort_args=>1,
-    # save_args=>1,
-    # nice=>'nice -n +20',  # nice command e.g. 'nice -n +10' or ''
-    # timeout=>15, # seconds until timeout
-    # log_filename=>"/scratch/davidh/results_simulations/tmp/log.txt",
-    # # current_log_filename=>"/scratch/davidh/results_simulations/tmp/grid_errors.log",
-    ############################################################
-    # Set default grid properties (in %self->{_grid_options}}
-    # and %{$self->{_bse_options}})
-    # This is the first thing that should be called by the user!
-    ############################################################
-    # # set signal handlers for timeout
-    # $self->set_class_signal_handlers();
-    # # set operating system
-    # my $os = rob_misc::operating_system();
-    # %{$self->{_grid_options}}=(
-    #     # save operating system
-    # operating_system=>$os,
-    #     # process name
-    #     process_name => 'binary_grid'.$VERSION,
-    # grid_defaults_set=>1, # so we know the grid_defaults function has been called
-    # # grid suspend files: assume binary_c by default
-    # suspend_files=>[$tmp.'/force_binary_c_suspend',
-    #         './force_binary_c_suspend'],
-    # snapshot_file=>$tmp.'/binary_c-snapshot',
-    # ########################################
-    # # infomration about the running grid script
-    # ########################################
-    # working_directory=>cwd(), # the starting directory
-    # perlscript=>$0, # the name of the perlscript
-    # perlscript_arguments=>join(' ',@ARGV), # arguments as a string
-    # perl_executable=>$^X, # the perl executable
-    # command_line=>join(' ',$0,@ARGV), # full command line
-    # process_ID=>$$, # process ID of the main perl script
-    # ########################################
-    # # GRID
-    # ########################################
-    #     # if undef, generate gridcode, otherwise load the gridcode
-    #     # from this file. useful for debugging
-    #     gridcode_from_file => undef,
-    #     # assume binary_grid perl backend by default
-    #     backend =>
-    #     $self->{_grid_options}->{backend} //
-    #     $binary_grid2::backend //
-    #     'binary_grid::Perl',
-    #     # custom C function for output : this automatically
-    #     # binds if a function is available.
-    #     C_logging_code => undef,
-    #     C_auto_logging => undef,
-    #     custom_output_C_function_pointer => binary_c_function_bind(),
-    # # control flow
-    # rungrid=>1, # usually run the grid, but can be 0
-    # # to skip it (e.g. for condor/slurm runs)
-    # merge_datafiles=>'',
-    # merge_datafiles_filelist=>'',
-    # # parameter space options
-    # binary=>0, # set to 0 for single stars, 1 for binaries
-    #     # if use_full_resolution is 1, then run a dummy grid to
-    #     # calculate the resolution. this could be slow...
-    #     use_full_resolution => 1,
-    # # the probability in any distribution must be within
-    # # this tolerance of 1.0, ignored if undef (if you want
-    # # to run *part* of the parameter space then this *must* be undef)
-    # probability_tolerance=>undef,
-    # # how to deal with a failure of the probability tolerance:
-    # # 0 = nothing
-    # # 1 = warning
-    # # 2 = stop
-    # probability_tolerance_failmode=>1,
-    # # add up and log system error count and probability
-    # add_up_system_errors=>1,
-    # log_system_errors=>1,
-    # # codes, paths, executables etc.
-    # # assume binary_c by default, and set its defaults
-    # code=>'binary_c',
-    # arg_prefix=>'--',
-    # prog=>'binary_c', # executable
-    # nice=>'nice -n +0', # nice command
-    # ionice=>'',
-    # # compress output?
-    # binary_c_compression=>0,
-    #     # get output as array of pre-split array refs
-    #     return_array_refs=>1,
-    # # environment
-    # shell_environment=>undef,
-    # libpath=>undef, # for backwards compatibility
-    # # where is binary_c? need this to get the values of some counters
-    # rootpath=>$self->okdir($ENV{BINARY_C_ROOTPATH}) //
-    # $self->okdir($ENV{HOME}.'/progs/stars/binary_c') //
-    # '.' , # last option is a fallback ... will fail if it doesn't exist
-    # srcpath=>$self->okdir($ENV{BINARY_C_SRCPATH}) //
-    # $self->okdir($ENV{BINARY_C_ROOTPATH}.'/src') //
-    # $self->okdir($ENV{HOME}.'/progs/stars/binary_c/src') //
-    # './src' , # last option is fallback... will fail if it doesn't exist
-    # # stack size per thread in megabytes
-    # threads_stack_size=>50,
-    # # thread sleep time between starting the evolution code and starting
-    # # the grid
-    # thread_presleep=>0,
-    # # threads
-    # # Max time a thread can sit looping (with calls to tbse_line)
-    # # before a warning is issued : NB this does not catch real freezes,
-    # # just infinite loops (which still output)
-    # thread_max_freeze_time_before_warning=>10,
-    # # run all models by default: modulo=1, offset=0
-    # modulo=>1,
-    # offset=>0,
-    #     # max number of stars on the queue
-    #     maxq_per_thread => 100,
-    # # data dump file : undef by default (do nothing)
-    # results_hash_dumpfile => '',
-    # # compress files with bzip2 by default
-    # compress_results_hash => 1,
-    # ########################################
-    # # CPU
-    # ########################################
-    # cpu_cap=>0, # if 1, limits to one CPU
-    # cpu_affinity => 0, # do not bind to a CPU by default
-    # ########################################
-    # # Code, Timeouts, Signals
-    # ########################################
-    # binary_grid_code_filtering=>1, #  you want this, it's (MUCH!) faster
-    # pre_filter_file=>undef, # dump pre filtered code to this file
-    # post_filter_file=>undef,  # dump post filtered code to this file
-    # timeout=>30, # timeout in seconds
-    # timeout_vb=>0, # no timeout logging
-    # tvb=>0, # no thread logging
-    # nfs_sleep=>1, # time to wait for NFS to catch up with file accesses
-    # # flexigrid checks the timeouts every
-    # # flexigrid_timeout_check_interval seconds
-    # flexigrid_timeout_check_interval=>0.01,
-    # # this is set to 1 when the grid is finished
-    # flexigrid_finished=>0,
-    # # allow signals by default
-    # 'no signals'=>0,
-    # # but perhaps disable specific signals?
-    # 'disable signal'=>{INT=>0,ALRM=>0,CONT=>0,USR1=>0,STOP=>0},
-    # # dummy variables
-    # single_star_period=>1e50,  # orbital period of a single star
-    # #### timers : set timers to 0 (or empty list) to ignore,
-    # #### NB these must be given context (e.g. main::xyz)
-    # #### for functions not in binary_grid
-    # timers=>0,
-    # timer_subroutines=>[
-    #     # this is a suggested default list
-    #     'flexigrid',
-    #         'set_next_alarm',
-    #     'vbout',
-    #         'vbout_fast',
-    #     'run_flexigrid_thread',
-    #         'thread_vb'
-    # ],
-    # ########################################
-    # # INPUT/OUTPUT
-    # ########################################
-    # blocking=>undef, # not yet set
-    # # prepend command with stdbuf to stop buffering (if available)
-    # stdbuf_command=>`stdbuf --version`=~/stdbuf \(GNU/ ? ' stdbuf -i0 -o0 -e0 ' : undef,
-    # vb=>("@ARGV"=~/\Wvb=(\d+)\W/)[0] // 0, # set to 1 (or more) for verbose output to the screen
-    # log_dt_secs=>1, # log output to stdout~every log_dt_secs seconds
-    # nmod=>10, # every nmod models there is output to the screen,
-    # # if log_dt_secs has been exceeded also (ignored if 0)
-    # colour=>1, # set to 1 to use the ANSIColor module for colour output
-    # log_args=>0, # do not log args in files
-    # log_fins=>0, # log end of runs too
-    #     sort_args=>0, # do not sort args
-    # save_args=>0, # do not save args in a string
-    # log_args_dir=>$tmp, # where to output the args files
-    # always_reopen_arg_files=>0, # if 1 then arg files are always closed and reopened
-    #   (may cause a lot of disk I/O)
-    # lazy_arg_sending=>1, # if 1, the previous args are remembered and
-    # # only args that changed are sent (except M1, M2 etc. which always
-    # # need sending)
-    # # force output files to open on a local disk (not an NFS partion)
-    # # not sure how to do this on another OS
-    # force_local_hdd_use=>($os eq 'unix'),
-    # # for verbose output, define the newline
-    # # For terminals use "\x0d", for files use "\n", in the
-    # # case of multiple threads this will be set to \n
-    # newline=> "\x0d",
-    #     # use reset_stars_defaults
-    #     reset_stars_defaults=>1,
-    # # set signal captures: argument determines behaviour when the code locks up
-    # # 0: exit
-    # # 1: reset and try the next star (does this work?!)
-    # alarm_procedure=>1,
-    # # exit on eval failure?
-    # exit_on_eval_failure=>1,
-    # ## functions: these should be set by perl lexical name
-    # ## (they are automatically converted to function pointers
-    # ## at runtime)
-    # # function to be called just before a thread is created
-    # thread_precreate_function=>undef,
-    #     thread_precreate_function_pointer=>undef,
-    # # function to be called just after a thread is created
-    # # (from inside the thread just before *grid () call)
-    # threads_entry_function=>undef,
-    #     threads_entry_function_pointer=>undef,
-    # # function to be called just after a thread is finished
-    # # (from inside the thread just after *grid () call)
-    # threads_flush_function=>undef,
-    # threads_flush_function_pointer=>undef,
-    # # function to be called just after a thread is created
-    # # (but external to the thread)
-    # thread_postrun_function=>undef,
-    # thread_postrun_function_pointer=>undef,
-    # # function to be called just before a thread join
-    # # (external to the thread)
-    # thread_prejoin_function=>undef,
-    # thread_prejoin_function_pointer=>undef,
-    # # default to using the internal join_flexigrid_thread function
-    # threads_join_function=>'binary_grid2::join_flexigrid_thread',
-    # threads_join_function_pointer=>sub{return $self->join_flexigrid_thread(@_)},
-    # # function to be called just after a thread join
-    # # (external to the thread)
-    # thread_postjoin_function=>undef,
-    # thread_postjoin_function_pointer=>undef,
-    # # usually, parse_bse in the main script is called
-    # parse_bse_function=>'main::parse_bse',
-    #     parse_bse_function_pointer=>undef,
-    # # if starting_snapshot_file is defined, load initial
-    # # values for the grid from the snapshot file rather
-    # # than a normal initiation: this enables you to
-    # # stop and start a grid
-    # starting_snapshot_file=>undef,
-}
-
-# Grid containing the descriptions of the options # TODO: add input types for all of them
-grid_options_descriptions = {
-    "tmp_dir": "Directory where certain types of output are stored. The grid code is stored in that directory, as well as the custom logging libraries. Log files and other diagnostics will usually be written to this location, unless specified otherwise",  # TODO: improve this
-    "_binary_c_dir": "Director where binary_c is stored. This options are not really used",
-    "_binary_c_config_executable": "Full path of the binary_c-config executable. This options is not used in the population object.",
-    "_binary_c_executable": "Full path to the binary_c executable. This options is not used in the population object.",
-    "_binary_c_shared_library": "Full path to the libbinary_c file. This options is not used in the population object",
-    "verbosity": "Verbosity of the population code. Default is 0, by which only errors will be printed. Higher values will show more output, which is good for debugging.",
-    "binary": "Set this to 1 if the population contains binaries. Input: int",  # TODO: write what effect this has.
-    "num_cores": "The number of cores that the population grid will use. The multiprocessing is useful but make sure to figure out how many logical cores the machine has (use e.g. psutil.cpu_count(logical=False) to find the true number of cores, psutil.cpu_count(logical=True) to find the number of logical cores). The core is multi processed, not multi threaded, and will gain no extra speed when num_cores exceeds the number of logical cores. Input: int",
-    "_start_time_evolution": "Variable storing the start timestamp of the population evolution. Set by the object itself.",  # TODO: make sure this is logged to a file
-    "_end_time_evolution": "Variable storing the end timestamp of the population evolution. Set by the object itself",  # TODO: make sure this is logged to a file
-    "_total_starcount": "Variable storing the total number of systems in the generator. Used and set by the population object.",
-    "_custom_logging_shared_library_file": "filename for the custom_logging shared library. Used and set by the population object",
-    "_errors_found": "Variable storing a Boolean flag whether errors by binary_c are encountered.",
-    "_errors_exceeded": "Variable storing a Boolean flag whether the number of errors was higher than the set threshold (failed_systems_threshold). If True, then the command line arguments of the failing systems will not be stored in the failed_system_log files.",
-    "source_file_filename": "Variable containing the source file containing lines of binary_c command line calls. These all have to start with binary_c.",  # TODO: Expand
-    "C_auto_logging": "Dictionary containing parameters to be logged by binary_c. The structure of this dictionary is as follows: the key is used as the headline which the user can then catch. The value at that key is a list of binary_c system parameters (like star[0].mass)",
-    "C_logging_code": "Variable to store the exact code that is used for the custom_logging. In this way the user can do more complex logging, as well as putting these logging strings in files.",
-    "_failed_count": "Variable storing the number of failed systems.",
-    "_evolution_type_options": "List containing the evolution type options.",
-    "_failed_prob": "Variable storing the total probability of all the failed systems",
-    "_failed_systems_error_codes": "List storing the unique error codes raised by binary_c of the failed systems",
-    "_grid_variables": "Dictionary storing the grid_variables. These contain properties which are accessed by the _generate_grid_code function",
-    "_population_id": "Variable storing a unique 32-char hex string.",
-    "_commandline_input": "String containing the arguments passed to the population object via the command line. Set and used by the population object.",
-    "_system_generator": "Function object that contains the system generator function. This can be from a grid, or a source file, or a Monte Carlo grid.",
-    "gridcode_filename": "Filename for the grid code. Set and used by the population object. TODO: allow the user to provide their own function, rather than only a generated function.",
-    "log_args": "Boolean to log the arguments of the current running system. ", 
-    "log_args_dir": "Directory to log the arguments of the current running system to.",
-    "log_file": "Log file for the population object. Unused",  # TODO: fix the functionality for this and describe it properly
-    "custom_logging_func_memaddr": "Memory address where the custom_logging_function is stored. Input: int",
-    "_count": "Counter tracking which system the generator is on.",
-    "_probtot": "Total probability of the population.",  # TODO: check whether this is used properly throughout
-    "_main_pid": "Main process ID of the master process. Used and set by the population object.",
-    "_store_memaddr": "Memory address of the store object for binary_c.",
-    "failed_systems_threshold": "Variable storing the maximum number of systems that are allowed to fail before logging their command line arguments to failed_systems log files",
-    "parse_function": "Function that the user can provide to handle the output the binary_c. This function has to take the arguments (self, output). Its best not to return anything in this function, and just store stuff in the self.grid_results dictionary, or just output results to a file",
-    "condor": "Int flag whether to use a condor type population evolution. Not implemented yet.",  # TODO: describe this in more detail
-    "slurm": "Int flag whether to use a Slurm type population evolution.",  # TODO: describe this in more detail
-    "weight": "Weight factor for each system. The calculated probability is multiplied by this. If the user wants each system to be repeated several times, then this variable should not be changed, rather change the _repeat variable instead, as that handles the reduction in probability per system. This is useful for systems that have a process with some random element in it.",  # TODO: add more info here, regarding the evolution splitting.
-    "repeat": "Factor of how many times a system should be repeated. Consider the evolution splitting binary_c argument for supernovae kick repeating.",  # TODO: make sure this is used.
-    "evolution_type": "Variable containing the type of evolution used of the grid. Multiprocessing or linear processing",
-    "combine_ensemble_with_thread_joining": "Boolean flag on whether to combine everything and return it to the user or if false: write it to data_dir/ensemble_output_{population_id}_{thread_id}.json",
-    "log_runtime_systems": "Whether to log the runtime of the systems . Each systems run by the thread is logged to a file and is stored in the tmp_dir. (1 file per thread). Don't use this if you are planning to run a lot of systems. This is mostly for debugging and finding systems that take long to run. Integer, default = 0. if value is 1 then the systems are logged",
-    "_total_mass_run": "To count the total mass that thread/process has ran",
-    "_total_probability_weighted_mass_run": "To count the total mass * probability for each system that thread/process has ran",
-    "_actually_evolve_system": "Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid",
-    "max_queue_size": "Maximum size of the queue that is used to feed the processes. Don't make this too big! Default: 1000. Input: int",
-    "_set_Moe2017_grid": "Internal flag whether the Moe and di Stefano (2017) grid has been loaded",
-    "run_zero_probability_system": "Whether to run the zero probability systems. Default: True. Input: Boolean",
-    "_zero_prob_stars_skipped": "Internal counter to track how many systems are skipped because they have 0 probability",
-    "ensemble_factor_in_probability_weighted_mass": "Flag to multiply all the ensemble results with 1/probability_weighted_mass",
-    "multiplicity_fraction_function": "Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: Moe and di Stefano (2017) 2017",
-    "m&s_options": "Internal variable that holds the Moe and di Stefano (2017) options. Don't write to this your self",
-    "_loaded_Moe2017_data": "Internal variable storing whether the Moe and di Stefano (2017) data has been loaded into memory",
-    "do_dry_run": "Whether to do a dry run to calculate the total probability for this run",
-    "_Moe2017_JSON_data": "Location to store the loaded Moe&diStefano2017 dataset",  # Stores the data
-}
-
-###
-#
-
-MIN_MASS_BINARY_C = float(
-    return_binary_c_version_info(parsed=True)["macros"]["BINARY_C_MINIMUM_STELLAR_MASS"]
-)
-
-
-# Default options for the Moe & di Stefano grid
-moe_di_stefano_default_options = {
-    # place holder for the JSON data to be used if a file
-    # isn't specified
-    "JSON": None,
-    # resolution data
-    "resolutions": {
-        "M": [
-            20,  # M1
-            20,  # M2 (i.e. q)
-            0,  # M3 currently unused
-            0,  # M4 currently unused
-        ],
-        "logP": [
-            20,  # P2 (binary period)
-            0,  # P3 (triple period) currently unused
-            0,  # P4 (quadruple period) currently unused
-        ],
-        "ecc": [
-            10,  # e (binary eccentricity)
-            0,  # e2 (triple eccentricity) currently unused
-            0,  # e3 (quadruple eccentricity) currently unused
-        ],
-    },
-    "samplerfuncs": {
-        "M": [None, None, None, None],
-        "logP": [None, None, None],
-        "ecc": [None, None, None],
-    },
-    "ranges": {
-        # stellar masses (Msun)
-        "M": [
-            MIN_MASS_BINARY_C
-            * 1.05,  # 0.08 is a tad bit above the minimum mass. Don't sample at 0.07, otherwise the first row of q values will have a phasevol of 0. Anything higher is fine.
-            80.0,  # (rather arbitrary) upper mass cutoff
-        ],
-        "q": [
-            None,  # artificial qmin : set to None to use default
-            None,  # artificial qmax : set to None to use default
-        ],
-        "logP": [0.0, 8.0],  # 0 = log10(1 day)  # 8 = log10(10^8 days)
-        "ecc": [0.0, 0.99],
-    },
-    # minimum stellar mass
-    "Mmin": MIN_MASS_BINARY_C,  # We take the value that binary_c has set as the default
-    # multiplicity model (as a function of log10M1)
-    #
-    # You can use 'Poisson' which uses the system multiplicity
-    # given by Moe and maps this to single/binary/triple/quad
-    # fractions.
-    #
-    # Alternatively, 'data' takes the fractions directly
-    # from the data, but then triples and quadruples are
-    # combined (and there are NO quadruples).
-    "multiplicity_model": "Poisson",
-    # multiplicity modulator:
-    # [single, binary, triple, quadruple]
-    #
-    # e.g. [1,0,0,0] for single stars only
-    #      [0,1,0,0] for binary stars only
-    #
-    # defaults to [1,1,0,0] i.e. all types
-    #
-    "multiplicity_modulator": [
-        1,  # single
-        1,  # binary
-        0,  # triple
-        0,  # quadruple
-    ],
-    # given a mix of multiplicities, you can either (noting that
-    # here (S,B,T,Q) = appropriate modulator * model(S,B,T,Q) )
-    #
-    # 'norm'  : normalise so the whole population is 1.0
-    #           after implementing the appropriate fractions
-    #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-    #
-    # 'raw'   : stick to what is predicted, i.e.
-    #           S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-    #           without normalisation
-    #           (in which case the total probability < 1.0 unless
-    #            all you use single, binary, triple and quadruple)
-    #
-    # 'merge' : e.g. if you only have single and binary,
-    #           add the triples and quadruples to the binaries, so
-    #           binaries represent all multiple systems
-    #           ...
-    #           *** this is canonical binary population synthesis ***
-    #
-    #           Note: if multiplicity_modulator == [1,1,1,1] this
-    #                 option does nothing (equivalent to 'raw').
-    #
-    #
-    # note: if you only set one multiplicity_modulator
-    # to 1, and all the others to 0, then normalising
-    # will mean that you effectively have the same number
-    # of stars as single, binary, triple or quad (whichever
-    # is non-zero) i.e. the multiplicity fraction is ignored.
-    # This is probably not useful except for
-    # testing purposes or comparing to old grids.
-    "normalize_multiplicities": "merge",
-    # q extrapolation (below 0.15 and above 0.9) method. We can choose from ['flat', 'linear', 'plaw2', 'nolowq']
-    "q_low_extrapolation_method": "linear",
-    "q_high_extrapolation_method": "linear",
-}
-
-moe_di_stefano_default_options_description = {
-    "resolutions": "",
-    "ranges": "",
-    "Mmin": "Minimum stellar mass",
-    "multiplicity_model": """
-multiplicity model (as a function of log10M1)
-
-You can use 'Poisson' which uses the system multiplicity
-given by Moe and maps this to single/binary/triple/quad
-fractions.
-
-Alternatively, 'data' takes the fractions directly
-from the data, but then triples and quadruples are
-combined (and there are NO quadruples).
-""",
-    "multiplicity_modulator": """
-[single, binary, triple, quadruple]
-
-e.g. [1,0,0,0] for single stars only
-     [0,1,0,0] for binary stars only
-
-defaults to [1,1,0,0] i.e. singles and binaries
-""",
-    "normalize_multiplicities": """
-'norm': normalise so the whole population is 1.0
-        after implementing the appropriate fractions
-        S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-        given a mix of multiplicities, you can either (noting that
-        here (S,B,T,Q) = appropriate modulator * model(S,B,T,Q) )
-        note: if you only set one multiplicity_modulator
-        to 1, and all the others to 0, then normalising
-        will mean that you effectively have the same number
-        of stars as single, binary, triple or quad (whichever
-        is non-zero) i.e. the multiplicity fraction is ignored.
-        This is probably not useful except for
-        testing purposes or comparing to old grids.
-
-'raw'   : stick to what is predicted, i.e.
-          S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q)
-          without normalisation
-          (in which case the total probability < 1.0 unless
-          all you use single, binary, triple and quadruple)
-
-'merge' : e.g. if you only have single and binary,
-          add the triples and quadruples to the binaries, so
-          binaries represent all multiple systems
-          ...
-          *** this is canonical binary population synthesis ***
-
-          It only takes the maximum multiplicity into account,
-          i.e. it doesn't multiply the resulting array by the multiplicity modulator again.
-          This prevents the resulting array to always be 1 if only 1 multiplicity modulator element is nonzero
-
-          Note: if multiplicity_modulator == [1,1,1,1]. this option does nothing (equivalent to 'raw').
-""",
-    "q_low_extrapolation_method": """
-q extrapolation (below 0.15) method
-    none
-    flat
-    linear2
-    plaw2
-    nolowq
-""",
-    "q_high_extrapolation_method": "Same as q_low_extrapolation_method",
-}
-
-
-#################################
-# Grid options functions
-
-# Utility functions
-def grid_options_help(option: str) -> dict:
-    """
-    Function that prints out the description of a grid option. Useful function for the user.
-
-    Args:
-        option: which option you want to have the description of
-
-    returns:
-        dict containing the option, the description if its there, otherwise empty string. And if the key doesnt exist, the dict is empty
-    """
-
-    option_keys = grid_options_defaults_dict.keys()
-    description_keys = grid_options_descriptions.keys()
-
-    if not option in option_keys:
-        print(
-            "Error: This is an invalid entry. Option does not exist, please choose from the following options:\n\t{}".format(
-                ", ".join(option_keys)
-            )
-        )
-        return {}
-
-    else:
-        if not option in description_keys:
-            print(
-                "This option has not been described properly yet. Please contact on of the authors"
-            )
-            return {option: ""}
-        else:
-            print(grid_options_descriptions[option])
-            return {option: grid_options_descriptions[option]}
-
-
-def grid_options_description_checker(print_info: bool = True) -> int:
-    """
-    Function that checks which descriptions are missing
-
-    Args:
-        print_info: whether to print out information about which options contain proper descriptions and which do not
-
-    Returns:
-        the number of undescribed keys
-    """
-
-    # Get the keys
-    option_keys = grid_options_defaults_dict.keys()
-    description_keys = grid_options_descriptions.keys()
-
-    #
-    undescribed_keys = list(set(option_keys) - set(description_keys))
-
-    if undescribed_keys:
-        if print_info:
-            print(
-                "Warning: the following keys have no description yet:\n\t{}".format(
-                    ", ".join(sorted(undescribed_keys))
-                )
-            )
-            print(
-                "Total description progress: {:.2f}%%".format(
-                    100 * len(description_keys) / len(option_keys)
-                )
-            )
-    return len(undescribed_keys)
-
-
-def write_grid_options_to_rst_file(output_file: str) -> None:
-    """
-    Function that writes the descriptions of the grid options to a rst file
-
-    Tasks:
-        TODO: separate things into private and public options
-
-    Args:
-        output_file: target file where the grid options descriptions are written to
-    """
-
-    # Get the options and the description
-    options = grid_options_defaults_dict
-    descriptions = grid_options_descriptions
-
-    # Get those that do not have a description
-    not_described_yet = list(set(options) - set(descriptions))
-
-    # separate public and private options
-    public_options = [key for key in options if not key.startswith("_")]
-    private_options = [key for key in options if key.startswith("_")]
-
-    # Check input
-    if not output_file.endswith(".rst"):
-        print("Filename doesn't end with .rst, please provide a proper filename")
-        return None
-
-    with open(output_file, "w") as f:
-        print("Population grid code options", file=f)
-        print("{}".format("=" * len("Population grid code options")), file=f)
-        print(
-            "The following chapter contains all grid code options, along with their descriptions",
-            file=f,
-        )
-        print(
-            "There are {} options that are not described yet.".format(
-                len(not_described_yet)
-            ),
-            file=f,
-        )
-        print("\n", file=f)
-
-        # Start public options part
-        print_option_descriptions(
-            f,
-            public_options,
-            descriptions,
-            "Public options",
-            "The following options are meant to be changed by the user.",
-        )
-
-        # Moe & di Stefano options:
-        print_option_descriptions(
-            f,
-            moe_di_stefano_default_options,
-            moe_di_stefano_default_options_description,
-            "Moe & di Stefano sampler options",
-            "The following options are meant to be changed by the user.",
-        )
-
-        # Start private options part
-        print_option_descriptions(
-            f,
-            private_options,
-            descriptions,
-            "Private options",
-            "The following options are not meant to be changed by the user, as these options are used and set internally by the object itself. The description still is provided, but just for documentation purposes.",
-        )
-
-
-def print_option_descriptions(filehandle, options, descriptions, title, extra_text):
-    # Start public options part
-    print("{}".format(title), file=filehandle)
-    print("{}".format("-" * len("{}".format(title))), file=filehandle)
-    print("{}".format(extra_text), file=filehandle)
-    print("\n", file=filehandle)
-
-    for option in sorted(options):
-        if option in descriptions:
-            print(
-                "| **{}**: {}".format(
-                    option, descriptions[option].replace("\n", "\n\t")
-                ),
-                file=filehandle,
-            )
-        else:
-            print(
-                "| **{}**: No description available yet".format(option),
-                file=filehandle,
-            )
-        print("", file=filehandle)
diff --git a/binarycpython/utils/population_extensions/grid_options_defaults.py b/binarycpython/utils/population_extensions/grid_options_defaults.py
index 776a4a868aa30a5e19bb9cdb12d9d5430846d61c..ac9da34ee1b11598f78f2804838f22136473e79d 100644
--- a/binarycpython/utils/population_extensions/grid_options_defaults.py
+++ b/binarycpython/utils/population_extensions/grid_options_defaults.py
@@ -56,7 +56,7 @@ class grid_options_defaults:
             ##########################
             # general (or unordered..)
             ##########################
-            "num_cores": 1,  # total number of cores used to evolve the population
+            "num_cores": None,  # total number of cores used to evolve the population (None -> use all logical cores)
             "num_cores_available": None,  # set automatically, not by the user
             "parse_function": None,  # Function to parse the output with.
             "multiplicity_fraction_function": 0,  # Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: Moe and di Stefano 2017
diff --git a/install.sh b/install.sh
index 42f91d61275ba2e570618dc5aaf7f08a45d9f5c3..f3204fd55025a2f06ec3ee9f9110cd750ee060af 100755
--- a/install.sh
+++ b/install.sh
@@ -13,7 +13,7 @@ echo "installing binarcpython version $VERSION_NUMBER"
 
 # do stuff...
 $PYTHON setup.py clean
-$PIP uninstall binarycpython
+$PIP uninstall -y binarycpython
 $PYTHON setup.py build --force
 $PYTHON setup.py sdist
 $PIP install -v dist/binarycpython-$VERSION_NUMBER.tar.gz
diff --git a/papers/joss/paper.bib b/papers/joss/paper.bib
new file mode 100644
index 0000000000000000000000000000000000000000..a4ab8faaa9b869b7bbeaf633f9d24b6f6bd690c7
--- /dev/null
+++ b/papers/joss/paper.bib
@@ -0,0 +1,144 @@
+@article{izzardNewSyntheticModel2004,
+  title = {A New Synthetic Model for Asymptotic Giant Branch Stars},
+  author = {Izzard, Robert G. and Tout, Christopher A. and Karakas, Amanda I. and Pols, Onno R.},
+  year = {2004},
+  month = may,
+  journal = {Monthly Notices of the Royal Astronomical Society},
+  volume = {350},
+  number = {2},
+  pages = {407--426},
+  issn = {0035-8711},
+  doi = {10.1111/j.1365-2966.2004.07446.x},
+  abstract = {Abstract.  We present a synthetic model for thermally pulsing asymptotic giant branch (TPAGB) evolution constructed by fitting expressions to full evolutionary},
+  langid = {english},
+  keywords = {masterthesis},
+  file = {/home/david/pdfs/Zotero/Izzard et al_2004_A new synthetic model for asymptotic giant branch stars.pdf;/home/david/Zotero/storage/DNNTAJQF/1113239.html}
+}
+
+@article{izzardPopulationNucleosynthesisSingle2006,
+  title = {Population Nucleosynthesis in Single and Binary Stars - {{I}}. {{Model}}},
+  author = {Izzard, R. G. and Dray, L. M. and Karakas, A. I. and Lugaro, M. and Tout, C. A.},
+  year = {2006},
+  month = dec,
+  journal = {Astronomy \& Astrophysics},
+  volume = {460},
+  number = {2},
+  pages = {565--572},
+  issn = {0004-6361, 1432-0746},
+  doi = {10.1051/0004-6361:20066129},
+  abstract = {We present a synthetic algorithm to rapidly calculate nucleosynthetic yields from populations of single and binary stars for use in population synthesis, globular cluster and Galactic chemical evolution simulations. Single star nucleosynthesis is fitted directly to full evolution models and our model includes first, second and third dredge-ups with \emph{s{$<$}i/{$>$}-process enhancements, an analytic calculation for hot-bottom burning of CNO, NeNa and MgAl isotopes, surface enhancements due to wind loss in massive stars and core-collapse supernova yields. Even though this algorithm operates about 10\textsuperscript{7{$<$}sup/{$>$} times faster than full evolution and nucleosynthesis calculations, agreement with such models is good. We extend the single star model to include prescriptions of binary star interactions, notably mass loss and gain by stellar winds and Roche-lobe overflow, novae and type Ia supernovae. As examples of the application of our algorithm we present models of some interesting systems containing chemically peculiar stars that may occur in binaries.}}},
+  copyright = {\textcopyright{} ESO, 2006},
+  langid = {english},
+  keywords = {masterthesis},
+  file = {/home/david/pdfs/Zotero/Izzard et al_2006_Population nucleosynthesis in single and binary stars - I.pdf;/home/david/Zotero/storage/8EB73LN9/aa6129-06.html}
+}
+
+@article{izzardPopulationSynthesisBinary2009,
+  ids = {izzardPopulationSynthesisBinary2009a},
+  title = {Population Synthesis of Binary Carbon-Enhanced Metal-Poor Stars},
+  author = {Izzard, R. G. and Glebbeek, E. and Stancliffe, R. J. and Pols, O. R.},
+  year = {2009},
+  month = dec,
+  journal = {Astronomy and Astrophysics},
+  volume = {508},
+  number = {3},
+  pages = {1359--1374},
+  publisher = {{EDP Sciences}},
+  issn = {0004-6361},
+  doi = {10.1051/0004-6361/200912827},
+  abstract = {The carbon-enhanced metal-poor (CEMP) stars constitute approximately one fifth of the metal-poor ([Fe/H] {$\lnapprox$} -2) population but their origin is not well understood. The most widely accepted formation scenario, at least for the majority of CEMP stars which are also enriched in s-process elements, invokes mass-transfer of carbon-rich material from a thermally-pulsing asymptotic giant branch (TPAGB) primary star to a less massive main-sequence companion which is seen today. Recent studies explore the possibility that an initial mass function biased toward intermediate-mass stars is required to reproduce the observed CEMP fraction in stars with metallicity [Fe/H]{$<$}-2.5. These models also implicitly predict a large number of nitrogen-enhanced metal-poor (NEMP) stars which is not seen. In this paper we investigate whether the observed CEMP and NEMP to extremely metal-poor (EMP) ratios can be explained without invoking a change in the initial mass function. We construct binary-star populations in an attempt to reproduce the observed number and chemical abundance patterns of CEMP stars at a metallicity [Fe/H]\texttildelow -2.3. Our binary-population models include synthetic nucleosynthesis in TPAGB stars and account for mass transfer and other forms of binary interaction. This approach allows us to explore uncertainties in the CEMP-star formation scenario by parameterization of uncertain input physics. In particular, we consider the uncertainty in the physics of third dredge up in the TPAGB primary, binary mass transfer and mixing in the secondary star. We confirm earlier findings that with current detailed TPAGB models, in which third dredge up is limited to stars more massive than about 1.25\textasciitilde M{$\odot$}, the large observed CEMP fraction cannot be accounted for. We find that efficient third dredge up in low-mass (less than 1.25\textasciitilde M{$\odot$}), low-metallicity stars may offer at least a partial explanation for the large observed CEMP fraction while remaining consistent with the small observed NEMP fraction. Appendices A-E are only available in electronic form at http://www.aanda.org},
+  keywords = {abundances,binaries: close,Galaxy: halo,Galaxy: stellar content,masterthesis,nuclear reactions,nucleosynthesis,project_core,stars: carbon,stars: chemically peculiar,to_read},
+  file = {/home/david/pdfs/Zotero/Izzard et al_2009_Population synthesis of binary carbon-enhanced metal-poor stars.pdf;/home/david/Zotero/storage/BAXNURKG/aa12827-09.html}
+}
+
+@article{izzardBinaryStarsGalactic2018,
+  title = {Binary Stars in the {{Galactic}} Thick Disc},
+  author = {Izzard, Robert G. and Preece, Holly and Jofre, Paula and Halabi, Ghina M. and Masseron, Thomas and Tout, Christopher A.},
+  year = {2018},
+  month = jan,
+  journal = {Monthly Notices of the Royal Astronomical Society},
+  volume = {473},
+  pages = {2984--2999},
+  issn = {0035-8711},
+  doi = {10.1093/mnras/stx2355},
+  abstract = {The combination of asteroseismologically measured masses with abundances from detailed analyses of stellar atmospheres challenges our fundamental knowledge of stars and our ability to model them. Ancient red-giant stars in the Galactic thick disc are proving to be most troublesome in this regard. They are older than 5 Gyr, a lifetime corresponding to an initial stellar mass of about 1.2 M{$\odot$}. So why do the masses of a sizeable fraction of thick-disc stars exceed 1.3 M{$\odot$}, with some as massive as 2.3 M{$\odot$}? We answer this question by considering duplicity in the thick-disc stellar population using a binary population-nucleosynthesis model. We examine how mass transfer and merging affect the stellar mass distribution and surface abundances of carbon and nitrogen. We show that a few per cent of thick-disc stars can interact in binary star systems and become more massive than 1.3 M{$\odot$}. Of these stars, most are single because they are merged binaries. Some stars more massive than 1.3 M{$\odot$} form in binaries by wind mass transfer. We compare our results to a sample of the APOKASC data set and find reasonable agreement except in the number of these thick-disc stars more massive than 1.3 M{$\odot$}. This problem is resolved by the use of a logarithmically flat orbital-period distribution and a large binary fraction.},
+  keywords = {binaries: general,binary,Galaxy: disc,Galaxy: stellar content,masterthesis,surrey,to_Read},
+  file = {/home/david/pdfs/Zotero/Izzard et al_2018_Binary stars in the Galactic thick disc2.pdf}
+}
+
+@article{foreman-mackeyEmceeMCMCHammer2013,
+  title = {Emcee: {{The MCMC Hammer}}},
+  shorttitle = {Emcee},
+  author = {{Foreman-Mackey}, Daniel and Hogg, David W. and Lang, Dustin and Goodman, Jonathan},
+  year = {2013},
+  month = mar,
+  journal = {Publications of the Astronomical Society of the Pacific},
+  volume = {125},
+  number = {925},
+  eprint = {1202.3665},
+  eprinttype = {arxiv},
+  pages = {306--312},
+  issn = {00046280, 15383873},
+  doi = {10.1086/670067},
+  abstract = {We introduce a stable, well tested Python implementation of the affine-invariant ensemble sampler for Markov chain Monte Carlo (MCMC) proposed by Goodman \& Weare (2010). The code is open source and has already been used in several published projects in the astrophysics literature. The algorithm behind emcee has several advantages over traditional MCMC sampling methods and it has excellent performance as measured by the autocorrelation time (or function calls per independent sample). One major advantage of the algorithm is that it requires hand-tuning of only 1 or 2 parameters compared to \$\textbackslash sim N\^2\$ for a traditional algorithm in an N-dimensional parameter space. In this document, we describe the algorithm and the details of our implementation and API. Exploiting the parallelism of the ensemble method, emcee permits any user to take advantage of multiple CPU cores without extra effort. The code is available online at http://dan.iel.fm/emcee under the MIT License.},
+  archiveprefix = {arXiv},
+  keywords = {Astrophysics - Instrumentation and Methods for Astrophysics,Physics - Computational Physics,Statistics - Computation},
+  file = {/home/david/pdfs/Zotero/Foreman-Mackey et al_2013_emcee.pdf;/home/david/Zotero/storage/SSM96UY2/1202.html}
+}
+
+@article{speagleDynestyDynamicNested2020,
+  title = {Dynesty: A Dynamic Nested Sampling Package for Estimating {{Bayesian}} Posteriors and Evidences},
+  shorttitle = {Dynesty},
+  author = {Speagle, Joshua S},
+  year = {2020},
+  month = apr,
+  journal = {Monthly Notices of the Royal Astronomical Society},
+  volume = {493},
+  number = {3},
+  pages = {3132--3158},
+  issn = {0035-8711},
+  doi = {10.1093/mnras/staa278},
+  abstract = {We present dynesty, a public, open-source, python package to estimate Bayesian posteriors and evidences (marginal likelihoods) using the dynamic nested sampling methods developed by Higson et~al. By adaptively allocating samples based on posterior structure, dynamic nested sampling has the benefits of Markov chain Monte Carlo (MCMC) algorithms that focus exclusively on posterior estimation while retaining nested sampling's ability to estimate evidences and sample from complex, multimodal distributions. We provide an overview of nested sampling, its extension to dynamic nested sampling, the algorithmic challenges involved, and the various approaches taken to solve them in this and previous work. We then examine dynesty's performance on a variety of toy problems along with several astronomical applications. We find in particular problems dynesty can provide substantial improvements in sampling efficiency compared to popular MCMC approaches in the astronomical literature. More detailed statistical results related to nested sampling are also included in the appendix.},
+  file = {/home/david/pdfs/Zotero/Speagle_2020_dynesty.pdf;/home/david/Zotero/storage/PZBMLD2H/5721521.html}
+}
+
+@article{astropycollaborationAstropyCommunityPython2013,
+  title = {Astropy: {{A}} Community {{Python}} Package for Astronomy},
+  shorttitle = {Astropy},
+  author = {{Astropy Collaboration} and Robitaille, Thomas P. and Tollerud, Erik J. and Greenfield, Perry and Droettboom, Michael and Bray, Erik and Aldcroft, Tom and Davis, Matt and Ginsburg, Adam and {Price-Whelan}, Adrian M. and Kerzendorf, Wolfgang E. and Conley, Alexander and Crighton, Neil and Barbary, Kyle and Muna, Demitri and Ferguson, Henry and Grollier, Fr{\'e}d{\'e}ric and Parikh, Madhura M. and Nair, Prasanth H. and Unther, Hans M. and Deil, Christoph and Woillez, Julien and Conseil, Simon and Kramer, Roban and Turner, James E. H. and Singer, Leo and Fox, Ryan and Weaver, Benjamin A. and Zabalza, Victor and Edwards, Zachary I. and Azalee Bostroem, K. and Burke, D. J. and Casey, Andrew R. and Crawford, Steven M. and Dencheva, Nadia and Ely, Justin and Jenness, Tim and Labrie, Kathleen and Lim, Pey Lian and Pierfederici, Francesco and Pontzen, Andrew and Ptak, Andy and Refsdal, Brian and Servillat, Mathieu and Streicher, Ole},
+  year = {2013},
+  month = oct,
+  journal = {Astronomy and Astrophysics},
+  volume = {558},
+  pages = {A33},
+  issn = {0004-6361},
+  doi = {10.1051/0004-6361/201322068},
+  abstract = {We present the first public version (v0.2) of the open-source and community-developed Python package, Astropy. This package provides core astronomy-related functionality to the community, including support for domain-specific file formats such as flexible image transport system (FITS) files, Virtual Observatory (VO) tables, and common ASCII table formats, unit and physical quantity conversions, physical constants specific to astronomy, celestial coordinate and time transformations, world coordinate system (WCS) support, generalized containers for representing gridded as well as tabular data, and a framework for cosmological transformations and conversions. Significant functionality is under activedevelopment, such as a model fitting framework, VO client and server tools, and aperture and point spread function (PSF) photometry tools. The core development team is actively making additions and enhancements to the current code base, and we encourage anyone interested to participate in the development of future Astropy versions.},
+  langid = {english},
+  file = {/home/david/pdfs/Zotero/Collaboration et al_2013_Astropy.pdf;/home/david/Zotero/storage/VXZP754H/abstract.html}
+}
+
+@article{astropycollaborationAstropyProjectBuilding2018,
+  title = {The {{Astropy Project}}: {{Building}} an {{Open-science Project}} and {{Status}} of the v2.0 {{Core Package}}},
+  shorttitle = {The {{Astropy Project}}},
+  author = {{Astropy Collaboration} and {Price-Whelan}, A. M. and Sip{\H o}cz, B. M. and G{\"u}nther, H. M. and Lim, P. L. and Crawford, S. M. and Conseil, S. and Shupe, D. L. and Craig, M. W. and Dencheva, N. and Ginsburg, A. and VanderPlas, J. T. and Bradley, L. D. and {P{\'e}rez-Su{\'a}rez}, D. and {de Val-Borro}, M. and Aldcroft, T. L. and Cruz, K. L. and Robitaille, T. P. and Tollerud, E. J. and Ardelean, C. and Babej, T. and Bach, Y. P. and Bachetti, M. and Bakanov, A. V. and Bamford, S. P. and Barentsen, G. and Barmby, P. and Baumbach, A. and Berry, K. L. and Biscani, F. and Boquien, M. and Bostroem, K. A. and Bouma, L. G. and Brammer, G. B. and Bray, E. M. and Breytenbach, H. and Buddelmeijer, H. and Burke, D. J. and Calderone, G. and Cano Rodr{\'i}guez, J. L. and Cara, M. and Cardoso, J. V. M. and Cheedella, S. and Copin, Y. and Corrales, L. and Crichton, D. and D'Avella, D. and Deil, C. and Depagne, {\'E}. and Dietrich, J. P. and Donath, A. and Droettboom, M. and Earl, N. and Erben, T. and Fabbro, S. and Ferreira, L. A. and Finethy, T. and Fox, R. T. and Garrison, L. H. and Gibbons, S. L. J. and Goldstein, D. A. and Gommers, R. and Greco, J. P. and Greenfield, P. and Groener, A. M. and Grollier, F. and Hagen, A. and Hirst, P. and Homeier, D. and Horton, A. J. and Hosseinzadeh, G. and Hu, L. and Hunkeler, J. S. and Ivezi{\'c}, {\v Z}. and Jain, A. and Jenness, T. and Kanarek, G. and Kendrew, S. and Kern, N. S. and Kerzendorf, W. E. and Khvalko, A. and King, J. and Kirkby, D. and Kulkarni, A. M. and Kumar, A. and Lee, A. and Lenz, D. and Littlefair, S. P. and Ma, Z. and Macleod, D. M. and Mastropietro, M. and McCully, C. and Montagnac, S. and Morris, B. M. and Mueller, M. and Mumford, S. J. and Muna, D. and Murphy, N. A. and Nelson, S. and Nguyen, G. H. and Ninan, J. P. and N{\"o}the, M. and Ogaz, S. and Oh, S. and Parejko, J. K. and Parley, N. and Pascual, S. and Patil, R. and Patil, A. A. and Plunkett, A. L. and Prochaska, J. X. and Rastogi, T. and Reddy Janga, V. and Sabater, J. and Sakurikar, P. and Seifert, M. and Sherbert, L. E. and {Sherwood-Taylor}, H. and Shih, A. Y. and Sick, J. and Silbiger, M. T. and Singanamalla, S. and Singer, L. P. and Sladen, P. H. and Sooley, K. A. and Sornarajah, S. and Streicher, O. and Teuben, P. and Thomas, S. W. and Tremblay, G. R. and Turner, J. E. H. and Terr{\'o}n, V. and {van Kerkwijk}, M. H. and {de la Vega}, A. and Watkins, L. L. and Weaver, B. A. and Whitmore, J. B. and Woillez, J. and Zabalza, V. and {Astropy Contributors}},
+  year = {2018},
+  month = sep,
+  journal = {The Astronomical Journal},
+  volume = {156},
+  pages = {123},
+  issn = {0004-6256},
+  doi = {10.3847/1538-3881/aabc4f},
+  abstract = {The Astropy Project supports and fosters the development of open-source and openly developed Python packages that provide commonly needed functionality to the astronomical community. A key element of the Astropy Project is the core package astropy, which serves as the foundation for more specialized projects and packages. In this article, we provide an overview of the organization of the Astropy project and summarize key features in the core package, as of the recent major release, version 2.0. We then describe the project infrastructure designed to facilitate and support development for a broader ecosystem of interoperable packages. We conclude with a future outlook of planned new features and directions for the broader Astropy Project. .},
+  keywords = {Astrophysics - Instrumentation and Methods for Astrophysics,methods: data analysis,methods: miscellaneous,methods: statistical,reference systems},
+  annotation = {ADS Bibcode: 2018AJ....156..123A},
+  file = {/home/david/pdfs/Zotero/Astropy Collaboration et al_2018_The Astropy Project2.pdf}
+}
+
+
+@article{mirouh_etal22,
+        author = {{Mirouh}, Giovanni M. and {Hendriks}, David D. and {Dykes}, Sophie and {Moe}, Maxwell and {Izzard}, Robert G.},
+         title = "{Detailed equilibrium and dynamical tides: impact on circularization and synchronization in open clusters}",
+       journal = {\mnras},
+          year = "submitted",
+}
\ No newline at end of file
diff --git a/papers/joss/paper.crossref b/papers/joss/paper.crossref
new file mode 100644
index 0000000000000000000000000000000000000000..e54d991d5f8bb6ff6dc9b1b813340c081eae1346
--- /dev/null
+++ b/papers/joss/paper.crossref
@@ -0,0 +1,237 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<doi_batch xmlns="http://www.crossref.org/schema/5.3.1"
+           xmlns:ai="http://www.crossref.org/AccessIndicators.xsd"
+           xmlns:rel="http://www.crossref.org/relations.xsd"
+           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           version="5.3.1"
+           xsi:schemaLocation="http://www.crossref.org/schema/5.3.1 http://www.crossref.org/schemas/crossref5.3.1.xsd">
+  <head>
+    <doi_batch_id>20220728T213835-31c226c27c5bafd915ddd929fd270d6cd13f2bf3</doi_batch_id>
+    <timestamp>20220728213835</timestamp>
+    <depositor>
+      <depositor_name>JOSS Admin</depositor_name>
+      <email_address>admin@theoj.org</email_address>
+    </depositor>
+    <registrant>The Open Journal</registrant>
+  </head>
+  <body>
+    <journal>
+      <journal_metadata>
+        <full_title>Journal of Open Source Software</full_title>
+        <abbrev_title>JOSS</abbrev_title>
+        <issn media_type="electronic">2475-9066</issn>
+        <doi_data>
+          <doi>10.21105/joss</doi>
+          <resource>https://joss.theoj.org/</resource>
+        </doi_data>
+      </journal_metadata>
+      <journal_issue>
+        <publication_date media_type="online">
+          <month>01</month>
+          <year>1970</year>
+        </publication_date>
+        <journal_volume>
+          <volume>¿VOL?</volume>
+        </journal_volume>
+        <issue>¿ISSUE?</issue>
+      </journal_issue>
+      <journal_article publication_type="full_text">
+        <titles>
+          <title>binary_c-python: A python-based stellar population
+synthesis tool and interface to binary_c</title>
+        </titles>
+        <contributors>
+          <person_name sequence="first" contributor_role="author">
+            <given_name>D. D.</given_name>
+            <surname>Hendriks</surname>
+            <ORCID>https://orcid.org/0000-0002-8717-6046</ORCID>
+          </person_name>
+          <person_name sequence="additional"
+                       contributor_role="author">
+            <given_name>R. G.</given_name>
+            <surname>Izzard</surname>
+            <ORCID>https://orcid.org/0000-0003-0378-4843</ORCID>
+          </person_name>
+        </contributors>
+        <publication_date>
+          <month>01</month>
+          <day>01</day>
+          <year>1970</year>
+        </publication_date>
+        <pages>
+          <first_page>¿PAGE?</first_page>
+        </pages>
+        <publisher_item>
+          <identifier id_type="doi">N/A</identifier>
+        </publisher_item>
+        <ai:program name="AccessIndicators">
+          <ai:license_ref applies_to="vor">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
+          <ai:license_ref applies_to="am">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
+          <ai:license_ref applies_to="tdm">http://creativecommons.org/licenses/by/4.0/</ai:license_ref>
+        </ai:program>
+        <rel:program>
+          <rel:related_item>
+            <rel:description>Software archive</rel:description>
+            <rel:inter_work_relation relationship-type="references" identifier-type="doi">10.5281</rel:inter_work_relation>
+          </rel:related_item>
+          <rel:related_item>
+            <rel:description>GitHub review issue</rel:description>
+            <rel:inter_work_relation relationship-type="hasReview" identifier-type="uri">https://github.com/openjournals</rel:inter_work_relation>
+          </rel:related_item>
+        </rel:program>
+        <doi_data>
+          <doi>N/A</doi>
+          <resource>https://joss.theoj.org/papers/N/A</resource>
+          <collection property="text-mining">
+            <item>
+              <resource mime_type="application/pdf">https://joss.theoj.org/papers/N/A.pdf</resource>
+            </item>
+          </collection>
+        </doi_data>
+        <citation_list>
+          <citation key="izzardNewSyntheticModel2004">
+            <article_title>A new synthetic model for asymptotic giant
+branch stars</article_title>
+            <author>Izzard</author>
+            <journal_title>Monthly Notices of the Royal Astronomical
+Society</journal_title>
+            <issue>2</issue>
+            <volume>350</volume>
+            <doi>10.1111/j.1365-2966.2004.07446.x</doi>
+            <issn>0035-8711</issn>
+            <cYear>2004</cYear>
+            <unstructured_citation>Izzard, R. G., Tout, C. A., Karakas,
+A. I., &amp; Pols, O. R. (2004). A new synthetic model for asymptotic
+giant branch stars. Monthly Notices of the Royal Astronomical Society,
+350(2), 407–426.
+https://doi.org/10.1111/j.1365-2966.2004.07446.x</unstructured_citation>
+          </citation>
+          <citation key="izzardPopulationNucleosynthesisSingle2006">
+            <article_title>Population nucleosynthesis in single and
+binary stars - I. Model</article_title>
+            <author>Izzard</author>
+            <journal_title>Astronomy &amp; Astrophysics</journal_title>
+            <issue>2</issue>
+            <volume>460</volume>
+            <doi>10.1051/0004-6361:20066129</doi>
+            <issn>0004-6361</issn>
+            <cYear>2006</cYear>
+            <unstructured_citation>Izzard, R. G., Dray, L. M., Karakas,
+A. I., Lugaro, M., &amp; Tout, C. A. (2006). Population nucleosynthesis
+in single and binary stars - I. Model. Astronomy &amp; Astrophysics,
+460(2), 565–572.
+https://doi.org/10.1051/0004-6361:20066129</unstructured_citation>
+          </citation>
+          <citation key="izzardPopulationSynthesisBinary2009">
+            <article_title>Population synthesis of binary
+carbon-enhanced metal-poor stars</article_title>
+            <author>Izzard</author>
+            <journal_title>Astronomy and Astrophysics</journal_title>
+            <issue>3</issue>
+            <volume>508</volume>
+            <doi>10.1051/0004-6361/200912827</doi>
+            <issn>0004-6361</issn>
+            <cYear>2009</cYear>
+            <unstructured_citation>Izzard, R. G., Glebbeek, E.,
+Stancliffe, R. J., &amp; Pols, O. R. (2009). Population synthesis of
+binary carbon-enhanced metal-poor stars. Astronomy and Astrophysics,
+508(3), 1359–1374.
+https://doi.org/10.1051/0004-6361/200912827</unstructured_citation>
+          </citation>
+          <citation key="izzardBinaryStarsGalactic2018">
+            <article_title>Binary stars in the Galactic thick
+disc</article_title>
+            <author>Izzard</author>
+            <journal_title>Monthly Notices of the Royal Astronomical
+Society</journal_title>
+            <volume>473</volume>
+            <doi>10.1093/mnras/stx2355</doi>
+            <issn>0035-8711</issn>
+            <cYear>2018</cYear>
+            <unstructured_citation>Izzard, R. G., Preece, H., Jofre, P.,
+Halabi, G. M., Masseron, T., &amp; Tout, C. A. (2018). Binary stars in
+the Galactic thick disc. Monthly Notices of the Royal Astronomical
+Society, 473, 2984–2999.
+https://doi.org/10.1093/mnras/stx2355</unstructured_citation>
+          </citation>
+          <citation key="foreman-mackeyEmceeMCMCHammer2013">
+            <article_title>Emcee: The MCMC Hammer</article_title>
+            <author>Foreman-Mackey</author>
+            <journal_title>Publications of the Astronomical Society of
+the Pacific</journal_title>
+            <issue>925</issue>
+            <volume>125</volume>
+            <doi>10.1086/670067</doi>
+            <cYear>2013</cYear>
+            <unstructured_citation>Foreman-Mackey, D., Hogg, D. W.,
+Lang, D., &amp; Goodman, J. (2013). Emcee: The MCMC Hammer. Publications
+of the Astronomical Society of the Pacific, 125(925), 306–312.
+https://doi.org/10.1086/670067</unstructured_citation>
+          </citation>
+          <citation key="speagleDynestyDynamicNested2020">
+            <article_title>Dynesty: A dynamic nested sampling package
+for estimating Bayesian posteriors and evidences</article_title>
+            <author>Speagle</author>
+            <journal_title>Monthly Notices of the Royal Astronomical
+Society</journal_title>
+            <issue>3</issue>
+            <volume>493</volume>
+            <doi>10.1093/mnras/staa278</doi>
+            <issn>0035-8711</issn>
+            <cYear>2020</cYear>
+            <unstructured_citation>Speagle, J. S. (2020). Dynesty: A
+dynamic nested sampling package for estimating Bayesian posteriors and
+evidences. Monthly Notices of the Royal Astronomical Society, 493(3),
+3132–3158. https://doi.org/10.1093/mnras/staa278</unstructured_citation>
+          </citation>
+          <citation key="astropycollaborationAstropyCommunityPython2013">
+            <article_title>Astropy: A community Python package for
+astronomy</article_title>
+            <author>Astropy Collaboration</author>
+            <journal_title>Astronomy and Astrophysics</journal_title>
+            <volume>558</volume>
+            <doi>10.1051/0004-6361/201322068</doi>
+            <issn>0004-6361</issn>
+            <cYear>2013</cYear>
+            <unstructured_citation>Astropy Collaboration, Robitaille, T.
+P., Tollerud, E. J., Greenfield, P., Droettboom, M., Bray, E., Aldcroft,
+T., Davis, M., Ginsburg, A., Price-Whelan, A. M., Kerzendorf, W. E.,
+Conley, A., Crighton, N., Barbary, K., Muna, D., Ferguson, H., Grollier,
+F., Parikh, M. M., Nair, P. H., … Streicher, O. (2013). Astropy: A
+community Python package for astronomy. Astronomy and Astrophysics, 558,
+A33. https://doi.org/10.1051/0004-6361/201322068</unstructured_citation>
+          </citation>
+          <citation key="astropycollaborationAstropyProjectBuilding2018">
+            <article_title>The Astropy Project: Building an Open-science
+Project and Status of the v2.0 Core Package</article_title>
+            <author>Astropy Collaboration</author>
+            <journal_title>The Astronomical Journal</journal_title>
+            <volume>156</volume>
+            <doi>10.3847/1538-3881/aabc4f</doi>
+            <issn>0004-6256</issn>
+            <cYear>2018</cYear>
+            <unstructured_citation>Astropy Collaboration, Price-Whelan,
+A. M., Sipőcz, B. M., Günther, H. M., Lim, P. L., Crawford, S. M.,
+Conseil, S., Shupe, D. L., Craig, M. W., Dencheva, N., Ginsburg, A.,
+VanderPlas, J. T., Bradley, L. D., Pérez-Suárez, D., de Val-Borro, M.,
+Aldcroft, T. L., Cruz, K. L., Robitaille, T. P., Tollerud, E. J., …
+Astropy Contributors. (2018). The Astropy Project: Building an
+Open-science Project and Status of the v2.0 Core Package. The
+Astronomical Journal, 156, 123.
+https://doi.org/10.3847/1538-3881/aabc4f</unstructured_citation>
+          </citation>
+          <citation key="mirouh_etal22">
+            <article_title>Detailed equilibrium and dynamical tides:
+impact on circularization and synchronization in open
+clusters</article_title>
+            <author>Mirouh</author>
+            <unstructured_citation>Mirouh, G. M., Hendriks, D. D.,
+Dykes, S., Moe, M., &amp; Izzard, R. G. (submitted). Detailed
+equilibrium and dynamical tides: impact on circularization and
+synchronization in open clusters.</unstructured_citation>
+          </citation>
+        </citation_list>
+      </journal_article>
+    </journal>
+  </body>
+</doi_batch>
diff --git a/papers/joss/paper.md b/papers/joss/paper.md
new file mode 100644
index 0000000000000000000000000000000000000000..a028ff55996452c170eb444d78ff54879eb98c5a
--- /dev/null
+++ b/papers/joss/paper.md
@@ -0,0 +1,50 @@
+---
+title: '`binary_c-python`: A python-based stellar population synthesis tool and interface to `binary_c`'
+tags:
+  - Python
+  - astronomy
+authors:
+  - name: D. D. Hendriks[^1]
+    orcid: 0000-0002-8717-6046
+    affiliation: 1 
+  - name: R. G. Izzard
+    orcid: 0000-0003-0378-4843
+    affiliation: 1
+affiliations:
+ - name: Department of Physics, University of Surrey, Guildford, GU2 7XH, Surrey, UK
+   index: 1
+date: 28 July 2022
+bibliography: paper.bib
+---
+
+# Summary
+
+We present our package [`binary_c-python`](https://ri0005.pages.surrey.ac.uk/binary_c-python/), which is aimed to provide a convenient and easy-to-use interface to the [`binary_c`](http://personal.ph.surrey.ac.uk/~ri0005/doc/binary_c/binary_c.html) [@izzardNewSyntheticModel2004; @izzardPopulationNucleosynthesisSingle2006; @izzardPopulationSynthesisBinary2009; @izzardBinaryStarsGalactic2018] framework, allowing the user to rapidly evolve individual systems and populations of stars. `binary_c-python` is available on [`Pip`](https://pypi.org/project/binarycpython/) and on [`Gitlab`](https://gitlab.com/binary_c/binary_c-python).
+
+The user can control output from `binary_c` by providing `binary_c-python` with logging statements that are dynamically compiled and loaded into `binary_c`. `binary_c-python` uses multiprocessing to utilise all the cores on a particular machine, and can run populations with HPC cluster workload managers like `HTCondor` and `Slurm`, allowing the user to run simulations on very large computing clusters.
+
+`binary_c-python` is easily interfaced or integrated with other Python-based codes and libraries, e.g. sampling codes like `Emcee` or `Dynesty`, or the astrophysics oriented package `Astropy` [@astropycollaborationAstropyCommunityPython2013; @foreman-mackeyEmceeMCMCHammer2013; @astropycollaborationAstropyProjectBuilding2018; @speagleDynestyDynamicNested2020]. Moreover, it is possible to provide custom system-generating functions through our function hooks, allowing third-party packages to manage the properties of the stars in the populations and evolve them through `binary_c-python`.
+
+Recent developments in `binary_c` include standardised output datasets called *ensembles*. `binary_c-python` easily processes these datasets and provides a suite of utility functions to handle them. Furthermore, `binary_c` now includes the *ensemble-manager* class, which makes use of the core functions and classes of `binary_c-python` to evolve a grid of stellar populations with varying input physics, allowing for large, automated parameter studies through a single interface.
+
+We provide [documentation](https://ri0005.pages.surrey.ac.uk/binary_c-python/index.html) that is automatically generated based on docstrings and a suite of `Jupyter` [notebooks](https://ri0005.pages.surrey.ac.uk/binary_c-python/example_notebooks.html). These notebooks consist of technical tutorials on how to use `binary_c-python`, and use-case scenarios aimed at doing science. Much of `binary_c-python` is covered by unit tests to ensure reliability and correctness, and the test coverage is continually increased as the package is being improved.
+
+# Statement of need
+
+In the current scientific climate `Python` is ubiquitous, and while lower-level codes written in, e.g., `Fortran` or `C` are still widely used, much of the newer software is written in `Python`, either entirely or as a wrapper around other codes and libraries. Education in programming also often includes `Python` courses because of its ease of use and its flexibility. Moreover, `Python` has a large community with many resources and tutorials. We have created `binary_c-python` to allow students and scientists alike to explore current scientific issues while enjoying the familiar syntax, and at the same time make use of the plentiful scientific and astrophysical packages like `Numpy`, `Scipy`, `Pandas`, `Astropy` and platforms like `Jupyter`.
+
+Earlier versions of `binary_c-python` were written in Perl, where much of the logic and structure were developed and debugged. This made porting to `Python` relatively easy.
+
+# Projects that use `binary_c-python`
+
+`binary_c-python` has already been used in a variety of situations, ranging from pure research to educational purposes, as well as in outreach events. In the summer of 2021 we used `binary_c-python` as the basis for the interactive classes on stellar ecosystems during the [International Max-Planck Research School summer school 2021 in Heidelberg](https://www2.mpia-hd.mpg.de/imprs-hd/SummerSchools/2021/), where students were introduced to the topic of population synthesis and were able to use our notebooks to perform their own calculations. `binary_c-python` has been used in @mirouh_etal22, where improvements to tidal interactions between stars were implemented, and initial birth parameter distributions were varied to match to observed binary systems in star clusters. A Master's thesis project, aimed at finding the birth system parameters of the V106 stellar system, comparing observations to results of `binary_c` and calculating the maximum likelihood with Bayesian inference through Markov chain Monte Carlo sampling. The project made use of `binary_c-python` and the `Emcee` package.
+
+Currently `binary_c-python` is used in several ongoing projects that study the effect of birth distributions on the occurrence of carbon-enhanced metal-poor (CEMP) stars, the occurrence and properties of accretion disks in main-sequence stars and the predicted observable black hole distribution by combining star formation and metallicity distributions with the output of `binary_c`. Moreover, we use the *ensemble* output structure to generate datasets for galactic chemical evolution on cosmological timescales, where we rely heavily on the utilities of `binary_c-python`.
+
+# Acknowledgements
+
+We acknowledge the helpful discussions and early testing efforts from M. Delorme, G. Mirouh, and D. Tracey, and the early work of J. Andrews which inspired our Python-C interface code. DDH thanks the UKRI/UoS for the funding grant H120341A. RGI thanks STFC for funding grants [ST/R000603/1](https://gtr.ukri.org/projects?ref=ST%2FR000603%2F1) and [ST/L003910/2](https://gtr.ukri.org/projects?ref=ST/L003910/2).
+
+# References
+
+[^1]: E-mail: <dh00601@surrey.ac.uk> (DDH)
diff --git a/papers/joss/paper.pdf b/papers/joss/paper.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..a52c35aa16fdf4f6007b51cafb17ca0c2af34053
Binary files /dev/null and b/papers/joss/paper.pdf differ