diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py
index 11aae04939b1ef7d9118f30dc82307fda444cf46..39e0919c0cf919f574c418d8d0ce4bf78a43a0dc 100644
--- a/binarycpython/utils/grid.py
+++ b/binarycpython/utils/grid.py
@@ -32,11 +32,13 @@ import logging
 import msgpack
 import multiprocessing
 import os
+import pathlib
 import py_rinterpolate
 import re
 import resource
 import setproctitle
 import strip_ansi
+import subprocess
 import sys
 import time
 import uuid
@@ -164,6 +166,10 @@ class Population:
         # Set the options that are passed at creation of the object
         self.set(**kwargs)
 
+        # default status_dir to be tmp_dir if it doesn't exist
+        if not self.grid_options['status_dir']:
+            self.grid_options['status_dir'] = self.grid_options['tmp_dir']
+
         # Load Moe and di Stefano options
         self.grid_options["Moe2017_options"] = copy.deepcopy(
             moe_di_stefano_default_options
@@ -379,6 +385,33 @@ class Population:
             # unpack the dictionary into the setting function that handles where the values are set
             self.set(**cmdline_dict)
 
+
+    def set_status(self,
+                   string,
+                   dir="process_status",
+                   format_statment="process_{}.txt",
+                   ID=self.process_ID()):
+        """
+        function to set the status string in its appropriate file
+        """
+        if self.grid_options['status_dir']:
+            with open(
+                    os.path.join(
+                        self.grid_options["status_dir"],
+                        dir,
+                        format_statment.format(ID),
+                    ),
+                    "w",
+            ) as f:
+                f.write(string)
+                f.close()
+
+        # custom logging functions
+        if self.grid_options['slurm']==1:
+            self.set_slurm_status(self,string)
+#        if self.grid_options['condor']==1:
+#            self.set_condor_status(self,string)
+
     def _return_argline(self, parameter_dict=None):
         """
         Function to create the string for the arg line from a parameter dict
@@ -1027,6 +1060,10 @@ class Population:
         NOTE: SLURM and CONDOR options are not working properly yet
         """
 
+        # do not evolve if rungrid is False
+        if self.grid_options['rungrid'] is False:
+            return
+
         # Just to make sure we don't have stuff from a previous run hanging around
         self._pre_run_cleanup()
 
@@ -1449,16 +1486,7 @@ class Population:
         # setproctitle.setthreadtitle(name)
 
         # Set to starting up
-        with open(
-            os.path.join(
-                self.grid_options["tmp_dir"],
-                "process_status",
-                "process_{}.txt".format(self.process_ID),
-            ),
-            "w",
-        ) as f:
-            f.write("STARTING")
-            f.close()
+        self.set_status("STARTING")
 
         # lets try out making stores for all the grids:
         self.grid_options["_store_memaddr"] = _binary_c_bindings.return_store_memaddr()
@@ -1717,16 +1745,7 @@ class Population:
             )
 
         # Set status to finishing
-        with open(
-            os.path.join(
-                self.grid_options["tmp_dir"],
-                "process_status",
-                "process_{}.txt".format(self.process_ID),
-            ),
-            "w",
-        ) as f:
-            f.write("FINISHING")
-            f.close()
+        self.set_status("FINISHING")
 
         if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL:
             stream_logger.debug(f"Process-{self.process_ID} is finishing.")
@@ -1891,16 +1910,7 @@ class Population:
             f.close()
 
         # Set status to finished
-        with open(
-            os.path.join(
-                self.grid_options["tmp_dir"],
-                "process_status",
-                "process_{}.txt".format(self.process_ID),
-            ),
-            "w",
-        ) as f:
-            f.write("FINISHED")
-            f.close()
+        self.set_status("FINISHED")
 
         verbose_print(
             "process {} queue put output_dict ".format(ID),
@@ -4971,3 +4981,205 @@ eccentricity3=0
                 reset=self.ANSI_colours["reset"],
             )
         )
+
+############################################################
+# Slurm support
+############################################################
+
+    def slurmpath(self,path):
+        # return the full slurm directory path
+        return os.path.abspath(os.path.join(self.grid_options['slurm_dir'],path))
+
+    def slurm_status_file(self,
+                          jobid=self.grid_options['slurm_jobid'],
+                          jobarrayindex=self.grid_options['slurm_jobarrayindex']):
+        """
+    Return the slurm status file corresponding to the jobid and jobarrayindex, which default to grid_options slurm_jobid and slurm_jobarrayindex, respectively.
+        """
+        return os.path.join(slurmpath(self,'status')
+                            self.grid_options['slurm_jobid'] + '.' + self.grid_options['slurm_jobarrayindex'])
+
+    def set_slurm_status(self,string):
+        """
+        Set the slurm status corresponing to the self object, which should have slurm_jobid and slurm_jobarrayindex set.
+        """
+        file = slurm_status_file(self)
+        if file:
+            with open(file,'w') as f:
+                f.write(string)
+                f.close()
+
+    def get_slurm_status(self,
+                         jobid=self.grid_options['jobid'],
+                         jobarrayindex=self.grid_options['jobarrayindex']):
+        """
+        Get and return the slurm status corresponing to the self object, or jobid.jobarrayindex if they are passed in. If no status is found, returns None.
+        """
+        path = pathlib.Path(slurm_status_file(self,jobid=jobid,jobarrayindex=jobarrayindex))
+        if path:
+            return path.read_text()
+        else:
+            return None
+
+    def slurm_outfile(self,jobid):
+        # return a standard filename for the slurm chunk files
+        return os.path.abspath(os.path.join(self.grid_options['slurm_dir'],'joining',jobid))
+
+    def make_slurm_dirs(self):
+
+        # make the slurm directories
+        if not self.grid_options['slurm_dir']:
+            print("You must set self.grid_options['slurm_dir'] to a directory which we can use to set up binary_c-python's Slurm files. This should be unique to your set of grids.")
+            os.exit()
+
+        # make a list of directories, these contain the various slurm
+        # output, status files, etc.
+        dirs = []
+        for dir in ['scripts','stdout','stderr','results','logs','status','joining']:
+            dirs.append(slurmpath(self,dir))
+
+        # make the directories
+        for dir in dirs:
+            pathlib.Path(slurmpath(self,dir)).mkdir(exit_ok=True,parents=True)
+
+        # check that they have been made and exist
+        fail = True
+        while fail is True:
+            fail = False
+            for dir in dirs:
+                if os.path.isdir(dir) is False:
+                    fail = True
+                    sleep 1
+                    break # break the "for dir in dirs:"
+
+    def slurm_grid(self):
+        # function to launch a grid of stellar selfs using
+        # Slurm
+
+
+        if self.grid_options['slurm_command'] is 'run_grid':
+            # run a grid of stars only, leaving the results
+            # in a file
+
+            # get number of cpu cores available to us
+            ncpus = max(1,psutil.cpu_count(logical=True)-1)
+
+            # use them all
+            self.custom_options['num_cores'] = ncpus
+
+            self.evolve()
+
+            return
+
+        elif self.grid_options['slurm_command'] is 'join':
+            # join the data from multiple grid runs
+
+            joinfile = os.path.join(slurmpath(self),'joining', self.grid_options['slurm_jobid'])
+
+            if os.path.exists(joinfile):
+                print("Another process is already joining")
+                return
+
+            for n in range(1,self.grid_options['slurm_njobs']+1):
+                results_dumpfile = os.path.join(slurmpath('status'),self.grid_options['slurm_jobid'] + '.' + n)
+                print("Check file {file} (jobid {jobid}, n {n}\n".format(
+                    file=results_dumpfle,
+                    jobid=self.grid_options['slurm_jobid'],
+                    n=n))
+                status = get_slurm_status(self,
+                                          jobid=self.grid_options['slurm_jobid'],
+                                          jobarrayindex=n)
+                if status != "finished":
+                    print("... is not finished")
+                    return
+                else:
+                    print("... is finished")
+
+            # attempt to ~atomically create the joinfile
+            # https://stackoverflow.com/questions/33223564/atomically-creating-a-file-if-it-doesnt-exist-in-python
+            try:
+                pathlib.Path(joinfile).touch(exist_ok=False)
+            except:
+                # already joining
+                return
+
+            self.grid_options['rungrid'] = False
+
+
+        else:
+            # setup and launch slurm jobs
+            make_slurm_dirs()
+
+            # check we're not using too much RAM
+            if self.grid_options['slurm_memory'] > self.grid_options['slurm_warn_max_memory']:
+                print("WARNING: you want to use > {} MB of RAM : this is unlikely to be correct. If you believe it is, set slurm_warn_max_memory to something very large (it is currently {} MB)\n".format(
+                    self.grid_options['slurm_memory'],
+                    self.grid_options['slurm_warn_max_memory']))
+                os.exit()
+
+            # set slurm_array
+            slurm_array = self.grid_options['slurm_array'] or "1 - {njobs}\%{njobs}".format(self.grid_options['slurm_njobs'])
+
+            # get job id (might be passed in)
+            jobid = self.grid_options['slurm_jobid'] if self.grid_options['slurm_jobid'] != "" else '$SLURM_ARRAY_JOB_ID'
+
+            # get job array index
+            jobarrayindex = self.grid_options['slurm_jobarrayindex'] if self.grid_options['slurm_jobarrayindex'] != "" else '$SLURM_ARRAY_TASK_ID'
+
+            # build the grid command
+            grid_command = [
+                "/usr/bin/env",
+                os.path.abspath(sys.argv[0]),
+                'run_flexigrid=1',
+                'offset=' + jobarrayindex,
+                'modulo=' + njobs,
+                'results_dumpfile=' os.path.join(slurmpath('results'), jobid + '.' + jobarrayindex),
+                'slurm_njobs=' + slurm_njobs,
+                'slurm_dir=' + self.grid_options['slurm_dir'],
+                'verbosity=' + self.grid_options['verbosity']
+            ]
+            grid_command = ' '.join(grid_command)
+
+            # make slurm script
+            scriptpath = slurmpath('slurm_script')
+            try:
+                script = open(scriptpath,'w')
+            except IOError:
+                print("Could not open Slurm script at {path} for writing: please check you have set {slurm_dir} correctly (it is currently {slurm_dir} and can write to this directory.".format(path=scriptpath,
+                                                                                                                                                                                                slurm_dir=self.grid_options['slurm_dir']))
+
+            lines = [
+                "#!/bin/bash\n",
+                "# Slurm file for binary_grid2 and slurm\n",
+                "#SBATCH --error={slurm_dir}/stderr/\%A.\%a\n".format(slurm_dir=self.grid_options['slurm_dir']),
+                "#SBATCH --output={slurm_dir}/stdout/\%A.\%a\n".format(slurm_dir=self.grid_options['slurm_dir']),
+                "#SBATCH --job-name={slurm_jobname}\n".format(slurm_jobname=self.grid_options['slurm_jobname']),
+                "#SBATCH --partition={slurm_partition}\n".format(slurm_partition=self.grid_options['slurm_partition']),
+                "#SBATCH --time={slurm_time}\n".format(slurm_time=self.grid_options['slurm_ntime']),
+                "#SBATCH --mem={slurm_memory}\n".format(slurm_memory=self.grid_options['slurm_memory']),
+                "#SBATCH --ntasks={slurm_ntasks}\n".format(slurm_ntasks=self.grid_options['slurm_ntasks']),
+                "#SBATCH --array={slurm_array}\n".format(slurm_array=slurm_array),
+                "\n# set status to \"running\"\n",
+                "echo \"running\" > {slurm_dir}/status/{jobid}.{jobarrayindex}\n\n".format(slurm_dir=self.grid_options['slurm_dir'],jobid=jobid,jobarrayindex=jobarrayindex),
+                "\n# run grid of stars\n{grid_command} rungrid=1 slurm_command=run_flexigrid\n".format(grid_command=grid_command),
+                "\n# set status to \"finished\"\necho \"finished\" > {slurm_dir}/status/{jobid}.{jobarrayindex}\n\n\n".format(slurm_dir=self.grid_options['slurm_dir'],jobid=jobid,jobarrayindex=jobarrayindex)
+            ]
+            if not self.grid_options['slurm_postpone_join']:
+                lines.append("\n# check if we can join\n{grid_command} rungrid=0 results_hash_dumpfile={slurm_dir}/results/{jobid}.all slurm_command=join".format(
+                    grid_command=grid_command,
+                    slurm_dir=self.grid_options['slurm_dir'],
+                    jobid=jobid))
+
+            # write to script and close it
+            script.writelines(lines)
+            script.close()
+
+            if not self.grid_options['slurm_postpone_sbatch']:
+                # launch scripts
+                cmd = ' '.join("sbatch",scriptpath)
+                subprocess.call(cmd)
+            else:
+                # just say we would have
+                print("Slurm script is at {path} but has not been launched".format(path=scriptpath))
+
+        print("All done in slurm_grid().")
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index bfe7a48b0ee789ed505ffaea73f6682dc86d199b..e3c5f4b73872448142e19fece8895ccc8ac4c9c3 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -159,73 +159,73 @@ grid_options_defaults_dict = {
     # Slurm stuff
     ########################################
     "slurm": 0,  # dont use the slurm by default. 1 = use slurm
-    # "slurm_ntasks": 1,  # CPUs required per array job: usually only need this
-    # "slurm_command": "",  # Command that slurm runs (e.g. evolve or join_datafiles)
-    # "slurm_dir": "",  # working directory containing scripts output logs etc.
-    # "slurm_njobs": 0,  # number of scripts; set to 0 as default
-    # "slurm_jobid": "",  # slurm job id (%A)
-    # "slurm_memory": 512,  # in MB, the memory use of the job
-    # "slurm_warn_max_memory": 1024,  # in MB : warn if mem req. > this
-    # "slurm_use_all_node_CPUs": 0,  # 1 = use all of a node's CPUs. 0 = use a given number of CPUs
-    # "slurm_postpone_join": 0,  # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM)
-    # "slurm_jobarrayindex": "",  # slurm job array index (%a)
-    # "slurm_jobname": "binary_grid",  # default
-    # "slurm_partition": None,
-    # "slurm_time": 0,  # total time. 0 = infinite time
-    # "slurm_postpone_sbatch": 0,  # if 1: don't submit, just make the script
-    # "slurm_array": None,  # override for --array, useful for rerunning jobs
-    # "slurm_use_all_node_CPUs": 0,  # if given nodes, set to 1
+    "slurm_ntasks": 1,  # CPUs required per array job: usually only need this
+    "slurm_command": "",  # Command that slurm runs (e.g. evolve or join_datafiles)
+    "slurm_dir": "",  # working directory containing scripts output logs etc.
+    "slurm_njobs": 0,  # number of scripts; set to 0 as default
+    "slurm_jobid": "",  # slurm job id (%A)
+    "slurm_memory": 512,  # in MB, the memory use of the job
+    "slurm_warn_max_memory": 1024,  # in MB : warn if mem req. > this
+    "slurm_use_all_node_CPUs": 0,  # 1 = use all of a node's CPUs. 0 = use a given number of CPUs
+    "slurm_postpone_join": 0,  # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM)
+    "slurm_jobarrayindex": "",  # slurm job array index (%a)
+    "slurm_jobname": "binary_grid",  # default
+    "slurm_partition": None,
+    "slurm_time": 0,  # total time. 0 = infinite time
+    "slurm_postpone_sbatch": 0,  # if 1: don't submit, just make the script
+    "slurm_array": None,  # override for --array, useful for rerunning jobs
+    "slurm_use_all_node_CPUs": 0,  # if given nodes, set to 1
     # # if given CPUs, set to 0
     # # you will want to use this if your Slurm SelectType is e.g. linear
     # # which means it allocates all the CPUs in a node to the job
-    # "slurm_control_CPUs": 0,  # if so, leave this many for Pythons control (0)
-    # "slurm_array": None,  # override for --array, useful for rerunning jobs
-    # "slurm_partition": None,  # MUST be defined
-    # "slurm_extra_settings": {},  # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
+    "slurm_control_CPUs": 0,  # if so, leave this many for Pythons control (0)
+    "slurm_array": None,  # override for --array, useful for rerunning jobs
+    "slurm_partition": None,  # MUST be defined
+    "slurm_extra_settings": {},  # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
     ########################################
     # Condor stuff
     ########################################
     "condor": 0,  # 1 to use condor, 0 otherwise
-    # "condor_command": "",  # condor command e.g. "evolve", "join"
-    # "condor_dir": "",  # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all)
-    # "condor_njobs": "",  # number of scripts/jobs that CONDOR will run in total
-    # "condor_jobid": "",  # condor job id
-    # "condor_postpone_join": 0,  # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM)
-    # # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
-    # "condor_join_pwd": "",  # directory the join should be in (defaults to $ENV{PWD} if undef)
-    # "condor_memory": 1024,  # in MB, the memory use (ImageSize) of the job
-    # "condor_universe": "vanilla",  # usually vanilla universe
-    # "condor_extra_settings": {},  # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
+    "condor_command": "",  # condor command e.g. "evolve", "join"
+    "condor_dir": "",  # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all)
+    "condor_njobs": "",  # number of scripts/jobs that CONDOR will run in total
+    "condor_jobid": "",  # condor job id
+    "condor_postpone_join": 0,  # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM)
+    # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
+    "condor_join_pwd": "",  # directory the join should be in (defaults to $ENV{PWD} if undef)
+    "condor_memory": 1024,  # in MB, the memory use (ImageSize) of the job
+    "condor_universe": "vanilla",  # usually vanilla universe
+    "condor_extra_settings": {},  # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
     # snapshots and checkpoints
-    # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit
-    # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found
-    # condor_checkpoint_interval=>0, # checkpoint interval (seconds)
-    # condor_checkpoint_stamp_times=>0, # if 1 then files are given timestamped names
+    condor_snapshot_on_kill:0, # if 1 snapshot on SIGKILL before exit
+    condor_load_from_snapshot:0, # if 1 check for snapshot .sv file and load it if found
+    condor_checkpoint_interval:0, # checkpoint interval (seconds)
+    condor_checkpoint_stamp_times:0, # if 1 then files are given timestamped names
     # (warning: lots of files!), otherwise just store the lates
-    # condor_streams=>0, # stream stderr/stdout by default (warning: might cause heavy network load)
-    # condor_save_joined_file=>0, # if 1 then results/joined contains the results
+    condor_streams:0, # stream stderr/stdout by default (warning: might cause heavy network load)
+    condor_save_joined_file:0, # if 1 then results/joined contains the results
     # (useful for debugging, otherwise a lot of work)
-    # condor_requirements=>'', # used?
+    condor_requirements:'', # used?
     #     # resubmit options : if the status of a condor script is
     #     # either 'finished','submitted','running' or 'crashed',
     #     # decide whether to resubmit it.
     #     # NB Normally the status is empty, e.g. on the first run.
     #     # These are for restarting runs.
-    #     condor_resubmit_finished=>0,
-    # condor_resubmit_submitted=>0,
-    # condor_resubmit_running=>0,
-    # condor_resubmit_crashed=>0,
+    #     condor_resubmit_finished:0,
+    condor_resubmit_submitted:0,
+    condor_resubmit_running:0,
+    condor_resubmit_crashed:0,
     ##########################
     # Unordered. Need to go through this. Copied from the perl implementation.
     ##########################
     ##
-    # return_array_refs=>1, # quicker data parsing mode
-    # sort_args=>1,
-    # save_args=>1,
-    # nice=>'nice -n +20',  # nice command e.g. 'nice -n +10' or ''
-    # timeout=>15, # seconds until timeout
-    # log_filename=>"/scratch/davidh/results_simulations/tmp/log.txt",
-    # # current_log_filename=>"/scratch/davidh/results_simulations/tmp/grid_errors.log",
+    # return_array_refs:1, # quicker data parsing mode
+    # sort_args:1,
+    # save_args:1,
+    # nice:'nice -n +20',  # nice command e.g. 'nice -n +10' or ''
+    # timeout:15, # seconds until timeout
+    # log_filename:"/scratch/davidh/results_simulations/tmp/log.txt",
+    # # current_log_filename:"/scratch/davidh/results_simulations/tmp/grid_errors.log",
     ############################################################
     # Set default grid properties (in %self->{_grid_options}}
     # and %{$self->{_bse_options}})
@@ -237,133 +237,133 @@ grid_options_defaults_dict = {
     # my $os = rob_misc::operating_system();
     # %{$self->{_grid_options}}=(
     #     # save operating system
-    # operating_system=>$os,
+    # operating_system:$os,
     #     # process name
-    #     process_name => 'binary_grid'.$VERSION,
-    # grid_defaults_set=>1, # so we know the grid_defaults function has been called
+    #     process_name : 'binary_grid'.$VERSION,
+    # grid_defaults_set:1, # so we know the grid_defaults function has been called
     # # grid suspend files: assume binary_c by default
-    # suspend_files=>[$tmp.'/force_binary_c_suspend',
+    # suspend_files:[$tmp.'/force_binary_c_suspend',
     #         './force_binary_c_suspend'],
-    # snapshot_file=>$tmp.'/binary_c-snapshot',
+    # snapshot_file:$tmp.'/binary_c-snapshot',
     # ########################################
     # # infomration about the running grid script
     # ########################################
-    # working_directory=>cwd(), # the starting directory
-    # perlscript=>$0, # the name of the perlscript
-    # perlscript_arguments=>join(' ',@ARGV), # arguments as a string
-    # perl_executable=>$^X, # the perl executable
-    # command_line=>join(' ',$0,@ARGV), # full command line
-    # process_ID=>$$, # process ID of the main perl script
+    # working_directory:cwd(), # the starting directory
+    # perlscript:$0, # the name of the perlscript
+    # perlscript_arguments:join(' ',@ARGV), # arguments as a string
+    # perl_executable:$^X, # the perl executable
+    # command_line:join(' ',$0,@ARGV), # full command line
+    # process_ID:$$, # process ID of the main perl script
     # ########################################
     # # GRID
     # ########################################
     #     # if undef, generate gridcode, otherwise load the gridcode
     #     # from this file. useful for debugging
-    #     gridcode_from_file => undef,
+    #     gridcode_from_file : undef,
     #     # assume binary_grid perl backend by default
-    #     backend =>
+    #     backend :
     #     $self->{_grid_options}->{backend} //
     #     $binary_grid2::backend //
     #     'binary_grid::Perl',
     #     # custom C function for output : this automatically
     #     # binds if a function is available.
-    #     C_logging_code => undef,
-    #     C_auto_logging => undef,
-    #     custom_output_C_function_pointer => binary_c_function_bind(),
+    #     C_logging_code : undef,
+    #     C_auto_logging : undef,
+    #     custom_output_C_function_pointer : binary_c_function_bind(),
     # # control flow
-    # rungrid=>1, # usually run the grid, but can be 0
+    rungrid : True, # usually run the grid, but can be 0
     # # to skip it (e.g. for condor/slurm runs)
-    # merge_datafiles=>'',
-    # merge_datafiles_filelist=>'',
+    # merge_datafiles:'',
+    # merge_datafiles_filelist:'',
     # # parameter space options
-    # binary=>0, # set to 0 for single stars, 1 for binaries
+    # binary:0, # set to 0 for single stars, 1 for binaries
     #     # if use_full_resolution is 1, then run a dummy grid to
     #     # calculate the resolution. this could be slow...
-    #     use_full_resolution => 1,
+    #     use_full_resolution : 1,
     # # the probability in any distribution must be within
     # # this tolerance of 1.0, ignored if undef (if you want
     # # to run *part* of the parameter space then this *must* be undef)
-    # probability_tolerance=>undef,
+    # probability_tolerance:undef,
     # # how to deal with a failure of the probability tolerance:
     # # 0 = nothing
     # # 1 = warning
     # # 2 = stop
-    # probability_tolerance_failmode=>1,
+    # probability_tolerance_failmode:1,
     # # add up and log system error count and probability
-    # add_up_system_errors=>1,
-    # log_system_errors=>1,
+    # add_up_system_errors:1,
+    # log_system_errors:1,
     # # codes, paths, executables etc.
     # # assume binary_c by default, and set its defaults
-    # code=>'binary_c',
-    # arg_prefix=>'--',
-    # prog=>'binary_c', # executable
-    # nice=>'nice -n +0', # nice command
-    # ionice=>'',
+    # code:'binary_c',
+    # arg_prefix:'--',
+    # prog:'binary_c', # executable
+    # nice:'nice -n +0', # nice command
+    # ionice:'',
     # # compress output?
-    # binary_c_compression=>0,
+    # binary_c_compression:0,
     #     # get output as array of pre-split array refs
-    #     return_array_refs=>1,
+    #     return_array_refs:1,
     # # environment
-    # shell_environment=>undef,
-    # libpath=>undef, # for backwards compatibility
+    # shell_environment:undef,
+    # libpath:undef, # for backwards compatibility
     # # where is binary_c? need this to get the values of some counters
-    # rootpath=>$self->okdir($ENV{BINARY_C_ROOTPATH}) //
+    # rootpath:$self->okdir($ENV{BINARY_C_ROOTPATH}) //
     # $self->okdir($ENV{HOME}.'/progs/stars/binary_c') //
     # '.' , # last option is a fallback ... will fail if it doesn't exist
-    # srcpath=>$self->okdir($ENV{BINARY_C_SRCPATH}) //
+    # srcpath:$self->okdir($ENV{BINARY_C_SRCPATH}) //
     # $self->okdir($ENV{BINARY_C_ROOTPATH}.'/src') //
     # $self->okdir($ENV{HOME}.'/progs/stars/binary_c/src') //
     # './src' , # last option is fallback... will fail if it doesn't exist
     # # stack size per thread in megabytes
-    # threads_stack_size=>50,
+    # threads_stack_size:50,
     # # thread sleep time between starting the evolution code and starting
     # # the grid
-    # thread_presleep=>0,
+    # thread_presleep:0,
     # # threads
     # # Max time a thread can sit looping (with calls to tbse_line)
     # # before a warning is issued : NB this does not catch real freezes,
     # # just infinite loops (which still output)
-    # thread_max_freeze_time_before_warning=>10,
+    # thread_max_freeze_time_before_warning:10,
     # # run all models by default: modulo=1, offset=0
-    # modulo=>1,
-    # offset=>0,
+    # modulo:1,
+    # offset:0,
     #     # max number of stars on the queue
-    #     maxq_per_thread => 100,
+    #     maxq_per_thread : 100,
     # # data dump file : undef by default (do nothing)
-    # results_hash_dumpfile => '',
+    # results_hash_dumpfile : '',
     # # compress files with bzip2 by default
-    # compress_results_hash => 1,
+    # compress_results_hash : 1,
     # ########################################
     # # CPU
     # ########################################
-    # cpu_cap=>0, # if 1, limits to one CPU
-    # cpu_affinity => 0, # do not bind to a CPU by default
+    # cpu_cap:0, # if 1, limits to one CPU
+    # cpu_affinity : 0, # do not bind to a CPU by default
     # ########################################
     # # Code, Timeouts, Signals
     # ########################################
-    # binary_grid_code_filtering=>1, #  you want this, it's (MUCH!) faster
-    # pre_filter_file=>undef, # dump pre filtered code to this file
-    # post_filter_file=>undef,  # dump post filtered code to this file
-    # timeout=>30, # timeout in seconds
-    # timeout_vb=>0, # no timeout logging
-    # tvb=>0, # no thread logging
-    # nfs_sleep=>1, # time to wait for NFS to catch up with file accesses
+    # binary_grid_code_filtering:1, #  you want this, it's (MUCH!) faster
+    # pre_filter_file:undef, # dump pre filtered code to this file
+    # post_filter_file:undef,  # dump post filtered code to this file
+    # timeout:30, # timeout in seconds
+    # timeout_vb:0, # no timeout logging
+    # tvb:0, # no thread logging
+    # nfs_sleep:1, # time to wait for NFS to catch up with file accesses
     # # flexigrid checks the timeouts every
     # # flexigrid_timeout_check_interval seconds
-    # flexigrid_timeout_check_interval=>0.01,
+    # flexigrid_timeout_check_interval:0.01,
     # # this is set to 1 when the grid is finished
-    # flexigrid_finished=>0,
+    # flexigrid_finished:0,
     # # allow signals by default
-    # 'no signals'=>0,
+    # 'no signals':0,
     # # but perhaps disable specific signals?
-    # 'disable signal'=>{INT=>0,ALRM=>0,CONT=>0,USR1=>0,STOP=>0},
+    # 'disable signal':{INT:0,ALRM:0,CONT:0,USR1:0,STOP:0},
     # # dummy variables
-    # single_star_period=>1e50,  # orbital period of a single star
+    # single_star_period:1e50,  # orbital period of a single star
     # #### timers : set timers to 0 (or empty list) to ignore,
     # #### NB these must be given context (e.g. main::xyz)
     # #### for functions not in binary_grid
-    # timers=>0,
-    # timer_subroutines=>[
+    # timers:0,
+    # timer_subroutines:[
     #     # this is a suggested default list
     #     'flexigrid',
     #         'set_next_alarm',
@@ -375,76 +375,76 @@ grid_options_defaults_dict = {
     # ########################################
     # # INPUT/OUTPUT
     # ########################################
-    # blocking=>undef, # not yet set
+    # blocking:undef, # not yet set
     # # prepend command with stdbuf to stop buffering (if available)
-    # stdbuf_command=>`stdbuf --version`=~/stdbuf \(GNU/ ? ' stdbuf -i0 -o0 -e0 ' : undef,
-    # vb=>("@ARGV"=~/\Wvb=(\d+)\W/)[0] // 0, # set to 1 (or more) for verbose output to the screen
-    # log_dt_secs=>1, # log output to stdout~every log_dt_secs seconds
-    # nmod=>10, # every nmod models there is output to the screen,
+    # stdbuf_command:`stdbuf --version`=~/stdbuf \(GNU/ ? ' stdbuf -i0 -o0 -e0 ' : undef,
+    # vb:("@ARGV"=~/\Wvb=(\d+)\W/)[0] // 0, # set to 1 (or more) for verbose output to the screen
+    # log_dt_secs:1, # log output to stdout~every log_dt_secs seconds
+    # nmod:10, # every nmod models there is output to the screen,
     # # if log_dt_secs has been exceeded also (ignored if 0)
-    # colour=>1, # set to 1 to use the ANSIColor module for colour output
-    # log_args=>0, # do not log args in files
-    # log_fins=>0, # log end of runs too
-    #     sort_args=>0, # do not sort args
-    # save_args=>0, # do not save args in a string
-    # log_args_dir=>$tmp, # where to output the args files
-    # always_reopen_arg_files=>0, # if 1 then arg files are always closed and reopened
+    # colour:1, # set to 1 to use the ANSIColor module for colour output
+    # log_args:0, # do not log args in files
+    # log_fins:0, # log end of runs too
+    #     sort_args:0, # do not sort args
+    # save_args:0, # do not save args in a string
+    # log_args_dir:$tmp, # where to output the args files
+    # always_reopen_arg_files:0, # if 1 then arg files are always closed and reopened
     #   (may cause a lot of disk I/O)
-    # lazy_arg_sending=>1, # if 1, the previous args are remembered and
+    # lazy_arg_sending:1, # if 1, the previous args are remembered and
     # # only args that changed are sent (except M1, M2 etc. which always
     # # need sending)
     # # force output files to open on a local disk (not an NFS partion)
     # # not sure how to do this on another OS
-    # force_local_hdd_use=>($os eq 'unix'),
+    # force_local_hdd_use:($os eq 'unix'),
     # # for verbose output, define the newline
     # # For terminals use "\x0d", for files use "\n", in the
     # # case of multiple threads this will be set to \n
-    # newline=> "\x0d",
+    # newline: "\x0d",
     #     # use reset_stars_defaults
-    #     reset_stars_defaults=>1,
+    #     reset_stars_defaults:1,
     # # set signal captures: argument determines behaviour when the code locks up
     # # 0: exit
     # # 1: reset and try the next star (does this work?!)
-    # alarm_procedure=>1,
+    # alarm_procedure:1,
     # # exit on eval failure?
-    # exit_on_eval_failure=>1,
+    # exit_on_eval_failure:1,
     # ## functions: these should be set by perl lexical name
     # ## (they are automatically converted to function pointers
     # ## at runtime)
     # # function to be called just before a thread is created
-    # thread_precreate_function=>undef,
-    #     thread_precreate_function_pointer=>undef,
+    # thread_precreate_function:undef,
+    #     thread_precreate_function_pointer:undef,
     # # function to be called just after a thread is created
     # # (from inside the thread just before *grid () call)
-    # threads_entry_function=>undef,
-    #     threads_entry_function_pointer=>undef,
+    # threads_entry_function:undef,
+    #     threads_entry_function_pointer:undef,
     # # function to be called just after a thread is finished
     # # (from inside the thread just after *grid () call)
-    # threads_flush_function=>undef,
-    # threads_flush_function_pointer=>undef,
+    # threads_flush_function:undef,
+    # threads_flush_function_pointer:undef,
     # # function to be called just after a thread is created
     # # (but external to the thread)
-    # thread_postrun_function=>undef,
-    # thread_postrun_function_pointer=>undef,
+    # thread_postrun_function:undef,
+    # thread_postrun_function_pointer:undef,
     # # function to be called just before a thread join
     # # (external to the thread)
-    # thread_prejoin_function=>undef,
-    # thread_prejoin_function_pointer=>undef,
+    # thread_prejoin_function:undef,
+    # thread_prejoin_function_pointer:undef,
     # # default to using the internal join_flexigrid_thread function
-    # threads_join_function=>'binary_grid2::join_flexigrid_thread',
-    # threads_join_function_pointer=>sub{return $self->join_flexigrid_thread(@_)},
+    # threads_join_function:'binary_grid2::join_flexigrid_thread',
+    # threads_join_function_pointer:sub{return $self->join_flexigrid_thread(@_)},
     # # function to be called just after a thread join
     # # (external to the thread)
-    # thread_postjoin_function=>undef,
-    # thread_postjoin_function_pointer=>undef,
+    # thread_postjoin_function:undef,
+    # thread_postjoin_function_pointer:undef,
     # # usually, parse_bse in the main script is called
-    # parse_bse_function=>'main::parse_bse',
-    #     parse_bse_function_pointer=>undef,
+    # parse_bse_function:'main::parse_bse',
+    #     parse_bse_function_pointer:undef,
     # # if starting_snapshot_file is defined, load initial
     # # values for the grid from the snapshot file rather
     # # than a normal initiation: this enables you to
     # # stop and start a grid
-    # starting_snapshot_file=>undef,
+    # starting_snapshot_file:undef,
 }
 
 # Grid containing the descriptions of the options # TODO: add input types for all of them