diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index 39cadf4504a01b1abef58d1f55dbce11619ce4c2..0706050710e07aa99c93215d88a9f59d5cf1b551 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -5,6 +5,7 @@ import sys
 import datetime
 import time
 import random
+import inspect
 
 import numpy as np
 import multiprocessing as mp
@@ -39,6 +40,7 @@ from binarycpython.utils.functions import (
 # TODO: add functionality to return the source_list
 # TODO: add functionality to return the ensemble_list
 # TODO: change the grid_options dict structure so that there is room for descriptions
+# TODO: consider spreading the functions over more files.
 
 # Make this function also an API call. Doest seem to get written to a buffer that is stored into a python object. rather its just written to stdout
 
@@ -61,6 +63,9 @@ class Population(object):
         # Argline dict
         self.argline_dict = {}
 
+        # Set main process id
+        self.grid_options['main_pid'] = os.getpid()
+
     ###################################################
     # Argument functions
     ###################################################
@@ -511,6 +516,7 @@ class Population(object):
             store_memaddr=self.grid_options["store_memaddr"],
             population=1,
         )
+
         if self.grid_options["parse_function"]:
             self.grid_options["parse_function"](self, out)
 
@@ -532,7 +538,7 @@ class Population(object):
 
         print("generator done")
 
-    def evolve_single(self, parse_function=None, clean_up_custom_logging_files=True):
+    def evolve_single(self, clean_up_custom_logging_files=True):
         """
         Function to run a single system
         
@@ -559,8 +565,8 @@ class Population(object):
             self.clean_up_custom_logging(evol_type="single")
 
         # Parse
-        if parse_function:
-            return parse_function(self, out)
+        if self.grid_options["parse_function"]:
+            return self.grid_options["parse_function"](self, out)
         else:
             return out
 
@@ -570,9 +576,16 @@ class Population(object):
         """
 
         import multiprocessing as mp
-
-        # from pathos.multiprocessing import ProcessingPool as Pool
         from pathos.pools import _ProcessPool as Pool
+        from pathos.helpers import mp as pathos_multiprocess
+
+        # TODO: make further use of a queue to handle jobs or at least get information on the process ids etc
+        # https://stackoverflow.com/questions/10190981/get-a-unique-id-for-worker-in-python-multiprocessing-pool
+        # https://stackoverflow.com/questions/8640367/python-manager-dict-in-multiprocessing/9536888 for muting values through dicts
+        # https://python-forum.io/Thread-Dynamic-updating-of-a-nested-dictionary-in-multiprocessing-pool
+        # https://stackoverflow.com/questions/28740955/working-with-pathos-multiprocessing-tool-in-python-and
+        manager = pathos_multiprocess.Manager()
+        self.grid_options['result_dict'] = manager.dict()
 
         # Create pool
         p = Pool(processes=self.grid_options["amt_cores"])
@@ -1306,11 +1319,18 @@ class Population(object):
 
         self.grid_options["probtot"] += prob
 
-
     def increment_count(self):
         """
         Function to add to the total amount of stars
         """
         self.grid_options["count"] += 1
 
+    # def join_result_dicts(self):
+    #     """
+    #     Function to join the result dictionaries
+    #     """
+
+
+
+
 ################################################################################################
\ No newline at end of file
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index 78f160eae1a398a508fe55912940db5a88bb5621..429c86657a04f61ad01715b2ebe62d9837fe1dbc 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -12,6 +12,8 @@ grid_options_defaults_dict = {
     "binary": 0,  # FLag on whether the systems are binary systems or single systems.
     "parse_function": None,  # FUnction to parse the output with.
     "tmp_dir": temp_dir(),  # Setting the temp dir of the program
+    "main_pid": -1, # Placeholder for the main process id of the run.
+
     # "output_dir":
     ##########################
     # binary_c files
@@ -54,13 +56,23 @@ grid_options_defaults_dict = {
     "system_generator": None,  # value that holds the function that generates the system (result of building the grid script)
     "population_type": "grid",  #
     "population_type_options": [
-        "grid"
+        "grid",
     ],  # TODO: fill later with monte carlo, source file etc
     "count": 0,  # total count of systems
     "probtot": 0,  # total probability
     "weight": 1.0,  # weighting for the probability
     "repeat": 1.0,  # number of times to repeat each system (probability is adjusted to be 1/repeat)
+    "results_per_worker": {}, # dict which can store info per worker. meh. doesnt work properly
+
+
     "start_time_evolution": 0,  # Start time of the grid
+    "end_time_evolution": 0, # end time of the grid
+    "error": 0, # error?
+    "failed_count": 0, # amt of failed systems
+    "failed_prob": 0, # Summed probability of failed systems
+    "id": 0, # Random id of this grid/population run,
+    "modulo": 1, # run modulo n of the grid.
+
     ## Grid type evolution
     "grid_variables": {},  # grid variables
     "grid_code": None,  # literal grid code
@@ -69,9 +81,121 @@ grid_options_defaults_dict = {
     # TODO: make MC options
     ## Evolution from source file
     # TODO: make run from sourcefile options.
-    # Grid variables: instructions to generate the values of the parameters
-    # Locations:
-    # Probability:
+
+    ## Other no yet implemented parts for the population evolution part    
+
+    #     # start at this model number: handy during debugging
+    #     # to skip large parts of the grid
+    #     start_at => 0
+    #     global_error_string => undef,
+    #     monitor_files => [],
+    #     nextlogtime   => 0,
+    #     nthreads      => 1, # number of threads
+    #     # start at model offset (0-based, so first model is zero)
+    #     offset        => 0,
+    #     resolution=>{
+    #         shift   =>0,
+    #         previous=>0,
+    #         n       =>{} # per-variable resolution
+    #     },
+    #     thread_q      => undef,
+    #     threads       => undef, # array of threads objects
+    #     tstart        => [gettimeofday], # flexigrid start time
+    #     __nvar        => 0, # number of grid variables
+    #     _varstub      => undef,
+    #     _lock         => undef,
+    #     _evcode_pids  => [],
+    # };
+
+
+
+
+
+
+
+
+
+
+
+
+
+    ########################################
+    # Slurm stuff
+    ########################################
+    "slurm": 0, # dont use the slurm by default
+    "slurm_command": "", # Command that slurm runs (e.g. run_flexigrid or join_datafiles)
+    "slurm_dir": "", #working directory containin scripts output logs etc.
+
+
+
+
+    # slurm_njobs=>'', # number of scripts
+    # slurm_jobid=>'', # slurm job id (%A)
+    # slurm_jobarrayindex=>'', # slurm job array index (%a)
+    #     slurm_jobname=>'binary_grid', # set to binary_grid
+    #     slurm_postpone_join=>0, # if 1, data is not joined, e.g. if you
+    # # want to do it off the slurm grid (e.g. with more RAM)
+    #     slurm_postpone_sbatch=>0, # if 1, don't submit, just make the script
+    # # (defaults to $ENV{PWD} if undef)
+    # slurm_memory=>512, # in MB, the memory use of the job
+    #     slurm_warn_max_memory=>1024, # in MB : warn if mem req. > this
+    #     slurm_partition=>undef,
+    #     slurm_ntasks=>1, # 1 CPU required per array job: usually only need this
+    #     slurm_time=>0, # 0 = infinite time
+    # slurm_use_all_node_CPUs=>0, # 1 = use all of a node's CPUs (0)
+    # # you will want to use this if your Slurm SelectType is e.g. linear
+    # # which means it allocates all the CPUs in a node to the job
+    # slurm_control_CPUs=>0, # if so, leave this many for Perl control (0)
+    #     slurm_array=>undef,# override for --array, useful for rerunning jobs
+
+
+    # ########################################
+    # # Condor stuff
+    # ########################################
+    # condor=>0, # 1 to use condor, 0 otherwise
+    #     condor_command=>'',# condor command e.g. "run_flexigrid",
+    # # "join_datafiles"
+    # condor_dir=>'', # working directory containing e.g.
+    # # scripts, output, logs (e.g. should be NFS available to all)
+    # condor_njobs=>'', # number of scripts
+    # condor_jobid=>'', # condor job id
+    # condor_postpone_join=>0, # if 1, data is not joined, e.g. if you
+    # # want to do it off the condor grid (e.g. with more RAM)
+    # condor_join_machine=>undef, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
+    # condor_join_pwd=>undef, # directory the join should be in
+    # # (defaults to $ENV{PWD} if undef)
+    # condor_memory=>1024, # in MB, the memory use (ImageSize) of the job
+    # condor_universe=>'vanilla', # usually vanilla universe
+    # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit
+    # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found
+    # condor_checkpoint_interval=>0, # checkpoint interval (seconds)
+    # condor_checkpoint_stamp_times=>0, # if 1 then files are given timestamped names (warning: lots of files!), otherwise just store the lates
+    # condor_streams=>0, # stream stderr/stdout by default (warning: might cause heavy network load)
+    # condor_save_joined_file=>0, # if 1 then results/joined contains the results (useful for debugging, otherwise a lot of work)
+    # condor_requirements=>'', # used?
+    #     # resubmit options : if the status of a condor script is
+    #     # either 'finished','submitted','running' or 'crashed',
+    #     # decide whether to resubmit it.
+    #     # NB Normally the status is empty, e.g. on the first run.
+    #     # These are for restarting runs.
+    #     condor_resubmit_finished=>0,
+    # condor_resubmit_submitted=>0,
+    # condor_resubmit_running=>0,
+    # condor_resubmit_crashed=>0,
+
+
+
+
+
+
+
+
+
+
+
+
+
+
     ##########################
     # Unordered. Need to go through this. Copied from the perl implementation.
     ##########################
@@ -83,6 +207,7 @@ grid_options_defaults_dict = {
     # timeout=>15, # seconds until timeout
     # log_filename=>"/scratch/davidh/results_simulations/tmp/log.txt",
     # # current_log_filename=>"/scratch/davidh/results_simulations/tmp/grid_errors.log",
+
     ############################################################
     # Set default grid properties (in %self->{_grid_options}}
     # and %{$self->{_bse_options}})
@@ -102,6 +227,7 @@ grid_options_defaults_dict = {
     # suspend_files=>[$tmp.'/force_binary_c_suspend',
     #         './force_binary_c_suspend'],
     # snapshot_file=>$tmp.'/binary_c-snapshot',
+
     # ########################################
     # # infomration about the running grid script
     # ########################################
@@ -190,65 +316,7 @@ grid_options_defaults_dict = {
     # results_hash_dumpfile => '',
     # # compress files with bzip2 by default
     # compress_results_hash => 1,
-    # ########################################
-    # # Condor stuff
-    # ########################################
-    # condor=>0, # 1 to use condor, 0 otherwise
-    #     condor_command=>'',# condor command e.g. "run_flexigrid",
-    # # "join_datafiles"
-    # condor_dir=>'', # working directory containing e.g.
-    # # scripts, output, logs (e.g. should be NFS available to all)
-    # condor_njobs=>'', # number of scripts
-    # condor_jobid=>'', # condor job id
-    # condor_postpone_join=>0, # if 1, data is not joined, e.g. if you
-    # # want to do it off the condor grid (e.g. with more RAM)
-    # condor_join_machine=>undef, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
-    # condor_join_pwd=>undef, # directory the join should be in
-    # # (defaults to $ENV{PWD} if undef)
-    # condor_memory=>1024, # in MB, the memory use (ImageSize) of the job
-    # condor_universe=>'vanilla', # usually vanilla universe
-    # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit
-    # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found
-    # condor_checkpoint_interval=>0, # checkpoint interval (seconds)
-    # condor_checkpoint_stamp_times=>0, # if 1 then files are given timestamped names (warning: lots of files!), otherwise just store the lates
-    # condor_streams=>0, # stream stderr/stdout by default (warning: might cause heavy network load)
-    # condor_save_joined_file=>0, # if 1 then results/joined contains the results (useful for debugging, otherwise a lot of work)
-    # condor_requirements=>'', # used?
-    #     # resubmit options : if the status of a condor script is
-    #     # either 'finished','submitted','running' or 'crashed',
-    #     # decide whether to resubmit it.
-    #     # NB Normally the status is empty, e.g. on the first run.
-    #     # These are for restarting runs.
-    #     condor_resubmit_finished=>0,
-    # condor_resubmit_submitted=>0,
-    # condor_resubmit_running=>0,
-    # condor_resubmit_crashed=>0,
-    # ########################################
-    # # Slurm stuff
-    # ########################################
-    #     slurm=>0, # don't use slurm by default
-    # slurm_command=>'',# slurm command e.g. "run_flexigrid",
-    # # "join_datafiles"
-    # slurm_dir=>'', # working directory containing e.g.
-    # # scripts, output, logs (e.g. should be NFS available to all)
-    # slurm_njobs=>'', # number of scripts
-    # slurm_jobid=>'', # slurm job id (%A)
-    # slurm_jobarrayindex=>'', # slurm job array index (%a)
-    #     slurm_jobname=>'binary_grid', # set to binary_grid
-    #     slurm_postpone_join=>0, # if 1, data is not joined, e.g. if you
-    # # want to do it off the slurm grid (e.g. with more RAM)
-    #     slurm_postpone_sbatch=>0, # if 1, don't submit, just make the script
-    # # (defaults to $ENV{PWD} if undef)
-    # slurm_memory=>512, # in MB, the memory use of the job
-    #     slurm_warn_max_memory=>1024, # in MB : warn if mem req. > this
-    #     slurm_partition=>undef,
-    #     slurm_ntasks=>1, # 1 CPU required per array job: usually only need this
-    #     slurm_time=>0, # 0 = infinite time
-    # slurm_use_all_node_CPUs=>0, # 1 = use all of a node's CPUs (0)
-    # # you will want to use this if your Slurm SelectType is e.g. linear
-    # # which means it allocates all the CPUs in a node to the job
-    # slurm_control_CPUs=>0, # if so, leave this many for Perl control (0)
-    #     slurm_array=>undef,# override for --array, useful for rerunning jobs
+
     # ########################################
     # # CPU
     # ########################################
@@ -288,6 +356,16 @@ grid_options_defaults_dict = {
     #     'run_flexigrid_thread',
     #         'thread_vb'
     # ],
+
+
+
+
+
+
+
+
+
+
     # ########################################
     # # INPUT/OUTPUT
     # ########################################
@@ -360,105 +438,5 @@ grid_options_defaults_dict = {
     # # than a normal initiation: this enables you to
     # # stop and start a grid
     # starting_snapshot_file=>undef,
-    # # flexigrid options
-    # flexigrid=>{
-    #         # there are several types of flexigrid:
-    #         #
-    #         # 'grid' is the traditional N-dimensional grid
-    #         # 'monte carlo' is a Monte Carlo simulation (may not work!)
-    #         # 'list' takes a list of systems from
-    #         #
-    #         # $self->{_grid_options}->{flexigrid}->{'list filename'}
-    #         #
-    #         # (which is a file containing list strings on each line)
-    #         #
-    #         # or from
-    #         #
-    #         # $self->{_grid_options}->{flexigrid}->{'list reference'}
-    #         #
-    #         # (which is a reference to a perl list)
-    #     'grid type'=>'grid',
-    #         'list filename'=>undef,  # undef unless 'grid type' is 'list'
-    #         'list reference'=>undef, # ditto
-    #         'listFP'=>undef, # file pointer : keep undef here (set automatically)
-    #     },
-    #     # start at this model number: handy during debugging
-    #     # to skip large parts of the grid
-    #     start_at => 0
-    # );
-    # );
-    # # if available, use the evcode's defaults
-    # if(1)
-    # {
-    #     my $evcode_args = $self->evcode_args_list();
-    #     if(defined $evcode_args &&
-    #        $evcode_args &&
-    #        ref $evcode_args eq 'ARRAY' &&
-    #        $#{$evcode_args} > 1)
-    #     {
-    #         foreach my $arg (grep {
-    #             !(
-    #                  # some args should be ignored
-    #                  /=\s*$/ ||
-    #                  /Function$/ ||
-    #                  /NULL$/ ||
-    #                  /\(?null\)?$/i ||
-    #                  /^M_[12]/ ||
-    #                  /^eccentricity/ ||
-    #                  /^orbital_period/ ||
-    #                  /^phasevol/ ||
-    #                  /^separation/ ||
-    #                  /^probability/ ||
-    #                  /^stellar_type/ ||
-    #                  /^_/||
-    #                  /^batchmode/  ||
-    #                  /^initial_abunds_only/ ||
-    #                  /^monte_carlo_kicks/
-    #                 )
-    #                          }@$evcode_args)
-    #         {
-    #             if($arg=~/(\S+) = (\S+)/)
-    #             {
-    #                 if(!defined $self->{_bse_options}->{$1})
-    #                 {
-    #                     #print "NEW set $1 to $2\n";
-    #                 }
-    #                 $self->{_bse_options}->{$1} =
-    #                     $2 eq 'TRUE' ? 1 :
-    #                     $2 eq 'FALSE' ? 0 :
-    #                     $2;
-    #                 #print "Set $1 -> $2 = $self->{_bse_options}->{$1}\n";
-    #             }
-    #         }
-    #     }
-    # }
-    # $self->{_flexigrid} = {
-    # count         => 0,
-    #     error         => 0,
-    #     failed_count  => 0,
-    #     failed_prob   => 0.0,
-    #     global_error_string => undef,
-    #     # random string to ID the flexigrid
-    #     id            => rob_misc::random_string(8),
-    #     modulo        => 1, # run modulo n
-    #     monitor_files => [],
-    #     nextlogtime   => 0,
-    #     nthreads      => 1, # number of threads
-    #     # start at model offset (0-based, so first model is zero)
-    #     offset        => 0,
-    #     probtot       => 0.0,
-    #     resolution=>{
-    #         shift   =>0,
-    #         previous=>0,
-    #         n       =>{} # per-variable resolution
-    #     },
-    #     results_hash  => $self->{_results},
-    #     thread_q      => undef,
-    #     threads       => undef, # array of threads objects
-    #     tstart        => [gettimeofday], # flexigrid start time
-    #     __nvar        => 0, # number of grid variables
-    #     _varstub      => undef,
-    #     _lock         => undef,
-    #     _evcode_pids  => [],
-    # };
+
 }
diff --git a/pathos_worker_objects.py b/pathos_worker_objects.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9d4fa11be73ea848f961f0a00c7fa4a2b43783a
--- /dev/null
+++ b/pathos_worker_objects.py
@@ -0,0 +1,21 @@
+import multiprocessing as mp
+
+from pathos.multiprocessing import ProcessingPool as Pool
+
+
+# Create pool
+p = Pool(processes=1, ncpus=3)
+
+
+print(dir(p))
+print(p)
+
+
+quit()
+# Execute
+# TODO: calculate the chunksize value based on: total starcount and cores used.
+r = list(
+    p.imap_unordered(
+        self.evolve_system_mp, self.yield_system_mp(), chunksize=20
+    )
+)
diff --git a/src/binary_c_python.c b/src/binary_c_python.c
index 3b7e913280034546644226d9ac0ad78552ef2412..7ecc09f0e1367efec821c2dd6ee4429d15810310 100644
--- a/src/binary_c_python.c
+++ b/src/binary_c_python.c
@@ -60,7 +60,7 @@ static char return_version_info_docstring[] =
 static char return_store_docstring[] = 
     "Return the store memory adress that will be passed to run_population";
 
-// static struct libbinary_c_store_t *store = NULL;
+static struct libbinary_c_store_t *store = NULL;
 
 /* Initialize pyobjects */
 // 
@@ -112,11 +112,7 @@ static PyMethodDef module_methods[] = {
 /* Making the module                                                              */
 /* ============================================================================== */
 
-
-/*  */
-// TODO: enforce python3 
-#if PY_MAJOR_VERSION >= 3
-
+/* Initialise the module. Removed the part which supports python 2 here on 17-03-2020 */
 /* Python 3+ */
 static struct PyModuleDef Py_binary_c_python_api =
 {
@@ -132,28 +128,10 @@ PyMODINIT_FUNC PyInit_binary_c_python_api(void)
     return PyModule_Create(&Py_binary_c_python_api);
 }
 
-#else
-
-/*
- * Python pre-V3
- *
- * NOT TESTED THOROUGHLY!
- */
-
-PyMODINIT_FUNC initbinary_c(void)
-{
-    PyObject *m = Py_InitModule3("binary_c_python_api", module_methods, module_docstring);
-    if(m == NULL)
-        return;
-}
-#endif // Python version check
-
 /* ============================================================================== */
-/* Some wierd function                                                            */
+/* Some function that we started out with. Unused now.                            */
 /* ============================================================================== */
 
-
-
 #ifdef __DEPRECATED
 static PyObject* binary_c_create_binary(PyObject *self, PyObject *args)
 {
@@ -282,10 +260,6 @@ static PyObject* binary_c_run_system(PyObject *self, PyObject *args, PyObject *k
     Safe_free(buffer);
     Safe_free(error_buffer);
 
-    /* 
-     * TODO
-     * return the return_error_string as well!
-     */
     return return_string;
 }
 
@@ -316,10 +290,6 @@ static PyObject* binary_c_return_arglines(PyObject *self, PyObject *args)
     Safe_free(buffer);
     Safe_free(error_buffer);
 
-    /* 
-     * TODO
-     * return the return_error_string as well!
-     */
     return return_string;
 }
 
@@ -450,9 +420,5 @@ static PyObject* binary_c_return_store(PyObject *self, PyObject *args)
     Safe_free(buffer);
     Safe_free(error_buffer);
 
-    /* 
-     * TODO
-     * return the return_error_string as well!
-     */
     return return_store_memaddr;
 }
\ No newline at end of file
diff --git a/tests/population/grid_tests.py b/tests/population/grid_tests.py
index a5991718a3e953663da1f4563e13210487adfe7c..858e809c7ec3ea6f0cb313a3a183156acc520134 100644
--- a/tests/population/grid_tests.py
+++ b/tests/population/grid_tests.py
@@ -33,8 +33,7 @@ test_pop.set(
 # print(len(test_pop.cleanup_defaults()))
 
 line = test_pop.return_argline(test_pop.cleanup_defaults())
-print(line)
-quit()
+
 ## Testing single evolution
 # test_pop.evolve_single()
 # test_pop.test_evolve_single()
@@ -332,7 +331,11 @@ test_pop.add_grid_variable(
 
 test_pop.set(verbose=1, amt_cores=2, binary=0, evolution_type="linear")
 
-# test_pop.test_evolve_population_mp()
-# test_pop.evolve_population_mp_chunks()
 
-test_pop.evolve_population()
+
+
+# test_pop.evolve_population()
+# test_pop.generate_grid_code()
+# test_pop.load_grid_function()
+# print(test_pop.grid_options['system_generator'])
+# test_pop.grid_options['system_generator'](test_pop)
\ No newline at end of file
diff --git a/tests/population/test_population.py b/tests/population/test_population.py
new file mode 100644
index 0000000000000000000000000000000000000000..a43d96917456bc855a5cf0faf6cb19b092c13235
--- /dev/null
+++ b/tests/population/test_population.py
@@ -0,0 +1,171 @@
+import os
+import json
+import time
+import pickle
+import sys
+
+import matplotlib.pyplot as plt
+
+from binarycpython.utils.grid import Population
+from binarycpython.utils.functions import get_help_all, get_help, create_hdf5, output_lines
+
+### 
+# Script to generate BH MS systems.
+
+
+def parse_function(self, output):
+    # extract info from the population instance
+    # TODO: think about whether this is smart. Passing around this object might be an overkill
+
+    ####################################################
+    # Get some information from the grid
+    data_dir = self.custom_options['data_dir']
+    base_filename = self.custom_options['base_filename']
+
+    # Check directory, make if necessary
+    os.makedirs(data_dir, exist_ok=True)
+    ####################################################
+
+    #
+    seperator = ' '
+    
+    # Create filename
+    outfilename = os.path.join(data_dir, base_filename)
+
+    result_header = ['zams_mass', 'st_0', 'st_1', 'st_2', 'st_3', 'st_4', 'st_5', 'st_6', 'st_7', 'st_8', 'st_9', 'st_10', 'st_11', 'st_12', 'st_13', 'st_14', 'st_15'] 
+
+    mass_lost_dict = {}
+    for i in range(16):
+        mass_lost_dict['{}'.format(i)] = 0
+
+    # Go over the output.
+    for el in output_lines(output):
+        headerline = el.split()[0]
+
+        # Check the header and act accordingly
+        if (headerline=='DAVID_MASSLOSS_SN'):
+            parameters = ['time', 'mass_1', 'prev_mass_1', 'zams_mass_1', 'stellar_type', 'probability']
+            values = el.split()[1:]
+
+            if not float(values[0])==0.0:
+                mass_lost = float(values[2])-float(values[1])
+                mass_lost_dict[values[4]] += mass_lost
+
+                initial_mass = values[3]
+
+                total_mass_lost += mass_lost
+
+    result_list = [initial_mass]
+    for key in mass_lost_dict.keys():
+        result_list.append(str(mass_lost_dict[key]))
+
+    result_dict = self.grid_options['result_dict']
+
+    # This trick is necessary
+    # Make the mass dict and set values
+    result_dict['mass'] = result_dict.get('mass', {})
+    mass_result = result_dict['mass']
+    mass_result[initial_mass] = mass_result.get(initial_mass, 0) + total_mass_lost
+    result_dict['mass'] = mass_result
+
+
+    result_dict['probability'] = result_dict.get('probability', 0) + 0.00002123
+
+## Set values
+test_pop = Population()
+test_pop.set(
+    C_logging_code="""
+Printf("DAVID_MASSLOSS_SN %30.12e %g %g %g %d %g\\n",
+    // 
+    stardata->model.time, // 1
+    stardata->star[0].mass, //2
+    stardata->previous_stardata->star[0].mass, //3
+    stardata->star[0].pms_mass, //4
+    stardata->star[0].stellar_type, //5
+    stardata->model.probability //6
+    );
+""")
+
+
+# Set grid variables
+resolution = {'M_1': 5, 'q': 5, 'per': 5}
+
+test_pop.add_grid_variable(
+    name="lnm1",
+    longname="Primary mass",
+    valuerange=[1, 150],
+    resolution="{}".format(resolution["M_1"]),
+    spacingfunc="const(math.log(1), math.log(150), {})".format(resolution["M_1"]),
+    precode="M_1=math.exp(lnm1)",
+    probdist="three_part_powerlaw(M_1, 0.1, 0.5, 1.0, 150, -1.3, -2.3, -2.3)*M_1",
+    dphasevol="dlnm1",
+    parameter_name="M_1",
+    condition="",  # Impose a condition on this grid variable. Mostly for a check for yourself
+)
+
+test_pop.add_grid_variable(
+    name="q",
+    longname="Mass ratio",
+    valuerange=["0.1/M_1", 1],
+    resolution="{}".format(resolution['q']),
+    spacingfunc="const(0.1/M_1, 1, {})".format(resolution['q']),
+    probdist="flatsections(q, [{'min': 0.1/M_1, 'max': 0.8, 'height': 1}, {'min': 0.8, 'max': 1.0, 'height': 1.0}])",
+    dphasevol="dq",
+    precode="M_2 = q * M_1",
+    parameter_name="M_2",
+    condition="",  # Impose a condition on this grid variable. Mostly for a check for yourself    
+)
+
+test_pop.add_grid_variable(
+    name="logper",
+    longname="log(Orbital_Period)",
+    valuerange=[-2, 12],
+    resolution="{}".format(resolution["per"]),
+    spacingfunc="np.linspace(-2, 12, {})".format(resolution["per"]),
+    precode="orbital_period = 10** logper\n", # TODO: 
+    probdist="gaussian(logper,4.8, 2.3, -2.0, 12.0)",
+    parameter_name="orbital_period",
+    dphasevol="dln10per",
+)
+
+##########################################################################
+metallicity = 0.002
+test_pop.set(
+    separation=1000000000, 
+    orbital_period=400000000, 
+    metallicity=metallicity, 
+    M_1=100,
+    M_2=5,
+
+    verbose=1,
+    data_dir=os.path.join(os.environ['BINARYC_DATA_ROOT'], 'testing_python', 'BHMS'),
+    base_filename="BH_MS_z{}.dat".format(metallicity),
+    parse_function=parse_function,
+    amt_cores=2,
+    )
+
+# out = test_pop.evolve_single()
+# print(out)
+
+
+# quit()
+test_pop.evolve_population()
+
+def handle_output(test_pop):
+
+    # $results is a hash reference containing
+    # the results that were added up in parse_data()
+    results = test_pop.grid_options['result_dict']
+
+    print(results)
+
+    # output the mass distribution
+handle_output(test_pop)
+
+
+# print(test_pop.grid_options['results_per_worker'])
+# # Export settings:
+# test_pop.export_all_info(use_datadir=True)
+
+# # hdf5
+# create_hdf5(test_pop.custom_options['data_dir'], name="BH_MS_z{}.hdf5".format(metallicity))
\ No newline at end of file