diff --git a/binarycpython/utils/condor.py b/binarycpython/utils/condor.py
index 85e621210f460cae0b0a43686fcfa9536eb0b2dd..3cb71852c59385f50ea7586fcba7ade6816ed825 100644
--- a/binarycpython/utils/condor.py
+++ b/binarycpython/utils/condor.py
@@ -362,7 +362,7 @@ queue {njobs}
                 request_memory = self.grid_options['condor_memory'],
                 request_cpus = self.grid_options['num_processes'],
                 should_transfer_files = self.grid_options['condor_should_transfer_files'],
-                when_to_transfer_output = self.grid_options['when_to_transfer_output'],
+                when_to_transfer_output = self.grid_options['condor_when_to_transfer_output'],
                 requirements = self.grid_options['condor_requirements'],
                 batchname = self.grid_options['condor_batchname'],
                 kill_sig = self.grid_options['condor_kill_sig'],
diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py
index cdca850744d153e40e6319994a0fbf7763880af7..7c7533b07f3835493373baaaabc028e40d592bae 100644
--- a/binarycpython/utils/grid_options_defaults.py
+++ b/binarycpython/utils/grid_options_defaults.py
@@ -164,29 +164,7 @@ class grid_options_defaults():
             # TODO: make MC options
             ## Evolution from source file
             # TODO: make run from sourcefile options.
-            ## Other no yet implemented parts for the population evolution part
-            #     # start at this model number: handy during debugging
-            #     # to skip large parts of the grid
-            #     start_at => 0
-            #     global_error_string => undef,
-            #     monitor_files => [],
-            #     nextlogtime   => 0,
-            #     nthreads      => 1, # number of threads
-            #     # start at model offset (0-based, so first model is zero)
-            #     offset        => 0,
-            #     resolution=>{
-            #         shift   =>0,
-            #         previous=>0,
-            #         n       =>{} # per-variable resolution
-            #     },
-            #     thread_q      => undef,
-            #     threads       => undef, # array of threads objects
-            #     tstart        => [gettimeofday], # flexigrid start time
-            #     __nvar        => 0, # number of grid variables
-            #     _varstub      => undef,
-            #     _lock         => undef,
-            #     _evcode_pids  => [],
-            # };
+
             ########################################
             # HPC variables
             ########################################
@@ -205,15 +183,14 @@ class grid_options_defaults():
             "slurm_warn_max_memory": '1024MB',  # warn if we set it to more than this (usually by accident)
             "slurm_postpone_join": 0,  # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM)
             "slurm_jobarrayindex": None,  # slurm job array index (%a)
-            "slurm_jobname": "binary_grid",  # default
+            "slurm_jobname": "binary_c-python",  # default
             "slurm_partition": None,
             "slurm_time": 0,  # total time. 0 = infinite time
             "slurm_postpone_sbatch": 0,  # if 1: don't submit, just make the script
             "slurm_array": None,  # override for --array, useful for rerunning jobs
             "slurm_array_max_jobs" : None, # override for the max number of concurrent array jobs
             "slurm_extra_settings": {},  # Dictionary of extra settings for Slurm to put in its launch script.
-            "slurm_sbatch": "sbatch", # sbatch command
-            "slurm_restart_dir" : None, # restart Slurm jobs from this directory
+            "slurm_sbatch": shutil.which("sbatch"), # sbatch command
             "slurm_env" : shutil.which("env"), # env location for Slurm
             "slurm_bash" : shutil.which("bash"), # bash location for Slurm
             "slurm_pwd" : shutil.which("pwd"), # pwd command location for Slurm
@@ -229,265 +206,34 @@ class grid_options_defaults():
             "condor_Process": None,  # condor process, equivalent to Slurm's jobarrayindex
             "condor_postpone_submit": 0,  # if 1, the condor script is not submitted (useful for debugging). Default 0.
             "condor_postpone_join": 0,  # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM). Default 0.
-            # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed)
             "condor_memory": 512,  # in MB, the memory use (ImageSize) of the job
             "condor_warn_max_memory": 1024,  # in MB, the memory use (ImageSize) of the job
             "condor_universe": "vanilla",  # usually vanilla universe
             "condor_extra_settings": {},  # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.
             # snapshots and checkpoints
             'condor_snapshot_on_kill':0, # if 1 snapshot on SIGKILL before exit
-            'condor_load_from_snapshot':0, # if 1 check for snapshot .sv file and load it if found
-            'condor_checkpoint_interval':0, # checkpoint interval (seconds)
-            'condor_checkpoint_stamp_times':0, # if 1 then files are given timestamped names
-            # (warning: lots of files!), otherwise just store the lates
             'condor_stream_output':True, # stream stdout
             'condor_stream_error':True, # stream stderr
             'condor_should_transfer_files' : 'YES',
-            'when_to_transfer_output' : 'ON_EXIT_OR_EVICT',
-            'condor_save_joined_file':0, # if 1 then results/joined contains the results
+            'condor_when_to_transfer_output' : 'ON_EXIT_OR_EVICT',
+            
             # (useful for debugging, otherwise a lot of work)
             'condor_requirements':'', # job requirements
             'condor_env' : shutil.which("env"), # /usr/bin/env location
             'condor_bash' : shutil.which("bash"), # bash executable location
             "condor_pwd" : shutil.which("pwd"), # pwd command location for Condor
             "condor_date" : shutil.which("date"), # bash location for Condor
-            "condor_array_max_jobs" : None, # override for the max number of concurrent array jobs
             "condor_initial_dir" : None, # directory from which condor is run, if None is the directory in which your script is run
-            "condor_submit" : "condor_submit", # the condor_submit command
+            "condor_submit" : shutil.which("condor_submit"), # the condor_submit command
             "condor_getenv" : True, # if True condor takes the environment at submission and copies it to the jobs. You almost certainly want this.
-            "condor_restart_dir" : None, # restart Condor jobs from this directory
             "condor_batchname" : "binary_c-condor", # Condor batchname option
             "condor_kill_sig" : "SIGINT", # signal Condor should use to stop a process : note that grid.py expects this to be "SIGINT"
-            ##########################
-            # Unordered. Need to go through this. Copied from the perl implementation.
-            ##########################
-            ##
-            # return_array_refs:1, # quicker data parsing mode
-            # sort_args:1,
-            # save_args:1,
-            # nice:'nice -n +20',  # nice command e.g. 'nice -n +10' or ''
-            # timeout:15, # seconds until timeout
-            # log_filename:"/scratch/davidh/results_simulations/tmp/log.txt",
-            # # current_log_filename:"/scratch/davidh/results_simulations/tmp/grid_errors.log",
-            ############################################################
-            # Set default grid properties (in %self->{_grid_options}}
-            # and %{$self->{_bse_options}})
-            # This is the first thing that should be called by the user!
-            ############################################################
-            # # set signal handlers for timeout
-            # $self->set_class_signal_handlers();
-            # # set operating system
-            # my $os = rob_misc::operating_system();
-            # %{$self->{_grid_options}}=(
-            #     # save operating system
-            # operating_system:$os,
-            #     # process name
-            #     process_name : 'binary_grid'.$VERSION,
-            # grid_defaults_set:1, # so we know the grid_defaults function has been called
-            # # grid suspend files: assume binary_c by default
-            # suspend_files:[$tmp.'/force_binary_c_suspend',
-            #         './force_binary_c_suspend'],
-            # snapshot_file:$tmp.'/binary_c-snapshot',
-            # ########################################
-            # # infomration about the running grid script
-            # ########################################
-            # working_directory:cwd(), # the starting directory
-            # perlscript:$0, # the name of the perlscript
-            # perlscript_arguments:join(' ',@ARGV), # arguments as a string
-            # perl_executable:$^X, # the perl executable
-            # command_line:join(' ',$0,@ARGV), # full command line
-            # process_ID:$$, # process ID of the main perl script
             # ########################################
             # # GRID
             # ########################################
-            #     # if undef, generate gridcode, otherwise load the gridcode
-            #     # from this file. useful for debugging
-            #     gridcode_from_file : undef,
-            #     # assume binary_grid perl backend by default
-            #     backend :
-            #     $self->{_grid_options}->{backend} //
-            #     $binary_grid2::backend //
-            #     'binary_grid::Perl',
-            #     # custom C function for output : this automatically
-            #     # binds if a function is available.
-            #     C_logging_code : undef,
-            #     C_auto_logging : undef,
-            #     custom_output_C_function_pointer : binary_c_function_bind(),
-            # # control flow
-            'rungrid' : 1, # usually run the grid, but can be 0
-            # # to skip it (e.g. for condor/slurm runs)
-            # merge_datafiles:'',
-            # merge_datafiles_filelist:'',
-            # # parameter space options
-            # binary:0, # set to 0 for single stars, 1 for binaries
-            #     # if use_full_resolution is 1, then run a dummy grid to
-            #     # calculate the resolution. this could be slow...
-            #     use_full_resolution : 1,
-            # # the probability in any distribution must be within
-            # # this tolerance of 1.0, ignored if undef (if you want
-            # # to run *part* of the parameter space then this *must* be undef)
-            # probability_tolerance:undef,
-            # # how to deal with a failure of the probability tolerance:
-            # # 0 = nothing
-            # # 1 = warning
-            # # 2 = stop
-            # probability_tolerance_failmode:1,
-            # # add up and log system error count and probability
-            # add_up_system_errors:1,
-            # log_system_errors:1,
-            # # codes, paths, executables etc.
-            # # assume binary_c by default, and set its defaults
-            # code:'binary_c',
-            # arg_prefix:'--',
-            # prog:'binary_c', # executable
-            # nice:'nice -n +0', # nice command
-            # ionice:'',
-            # # compress output?
-            # binary_c_compression:0,
-            #     # get output as array of pre-split array refs
-            #     return_array_refs:1,
-            # # environment
-            # shell_environment:undef,
-            # libpath:undef, # for backwards compatibility
-            # # where is binary_c? need this to get the values of some counters
-            # rootpath:$self->okdir($ENV{BINARY_C_ROOTPATH}) //
-            # $self->okdir($ENV{HOME}.'/progs/stars/binary_c') //
-            # '.' , # last option is a fallback ... will fail if it doesn't exist
-            # srcpath:$self->okdir($ENV{BINARY_C_SRCPATH}) //
-            # $self->okdir($ENV{BINARY_C_ROOTPATH}.'/src') //
-            # $self->okdir($ENV{HOME}.'/progs/stars/binary_c/src') //
-            # './src' , # last option is fallback... will fail if it doesn't exist
-            # # stack size per thread in megabytes
-            # threads_stack_size:50,
-            # # thread sleep time between starting the evolution code and starting
-            # # the grid
-            # thread_presleep:0,
-            # # threads
-            # # Max time a thread can sit looping (with calls to tbse_line)
-            # # before a warning is issued : NB this does not catch real freezes,
-            # # just infinite loops (which still output)
-            # thread_max_freeze_time_before_warning:10,
-            # # run all models by default: modulo=1, offset=0
-            # modulo:1,
-            # offset:0,
-            #     # max number of stars on the queue
-            #     maxq_per_thread : 100,
-            # # data dump file : undef by default (do nothing)
-            # results_hash_dumpfile : '',
-            # # compress files with bzip2 by default
-            # compress_results_hash : 1,
-            # ########################################
-            # # CPU
-            # ########################################
-            # cpu_cap:0, # if 1, limits to one CPU
-            # cpu_affinity : 0, # do not bind to a CPU by default
-            # ########################################
-            # # Code, Timeouts, Signals
-            # ########################################
-            # binary_grid_code_filtering:1, #  you want this, it's (MUCH!) faster
-            # pre_filter_file:undef, # dump pre filtered code to this file
-            # post_filter_file:undef,  # dump post filtered code to this file
-            # timeout:30, # timeout in seconds
-            # timeout_vb:0, # no timeout logging
-            # tvb:0, # no thread logging
-            # nfs_sleep:1, # time to wait for NFS to catch up with file accesses
-            # # flexigrid checks the timeouts every
-            # # flexigrid_timeout_check_interval seconds
-            # flexigrid_timeout_check_interval:0.01,
-            # # this is set to 1 when the grid is finished
-            # flexigrid_finished:0,
-            # # allow signals by default
-            # 'no signals':0,
-            # # but perhaps disable specific signals?
-            # 'disable signal':{INT:0,ALRM:0,CONT:0,USR1:0,STOP:0},
-            # # dummy variables
-            # single_star_period:1e50,  # orbital period of a single star
-            # #### timers : set timers to 0 (or empty list) to ignore,
-            # #### NB these must be given context (e.g. main::xyz)
-            # #### for functions not in binary_grid
-            # timers:0,
-            # timer_subroutines:[
-            #     # this is a suggested default list
-            #     'flexigrid',
-            #         'set_next_alarm',
-            #     'vbout',
-            #         'vbout_fast',
-            #     'run_flexigrid_thread',
-            #         'thread_vb'
-            # ],
-            # ########################################
-            # # INPUT/OUTPUT
-            # ########################################
-            # blocking:undef, # not yet set
-            # # prepend command with stdbuf to stop buffering (if available)
-            # stdbuf_command:`stdbuf --version`=~/stdbuf \(GNU/ ? ' stdbuf -i0 -o0 -e0 ' : undef,
-            # vb:("@ARGV"=~/\Wvb=(\d+)\W/)[0] // 0, # set to 1 (or more) for verbose output to the screen
-            # log_dt_secs:1, # log output to stdout~every log_dt_secs seconds
-            # nmod:10, # every nmod models there is output to the screen,
-            # # if log_dt_secs has been exceeded also (ignored if 0)
-            # colour:1, # set to 1 to use the ANSIColor module for colour output
-            # log_args:0, # do not log args in files
-            # log_fins:0, # log end of runs too
-            #     sort_args:0, # do not sort args
-            # save_args:0, # do not save args in a string
-            # log_args_dir:$tmp, # where to output the args files
-            # always_reopen_arg_files:0, # if 1 then arg files are always closed and reopened
-            #   (may cause a lot of disk I/O)
-            # lazy_arg_sending:1, # if 1, the previous args are remembered and
-            # # only args that changed are sent (except M1, M2 etc. which always
-            # # need sending)
-            # # force output files to open on a local disk (not an NFS partion)
-            # # not sure how to do this on another OS
-            # force_local_hdd_use:($os eq 'unix'),
-            # # for verbose output, define the newline
-            # # For terminals use "\x0d", for files use "\n", in the
-            # # case of multiple threads this will be set to \n
-            # newline: "\x0d",
-            #     # use reset_stars_defaults
-            #     reset_stars_defaults:1,
-            # # set signal captures: argument determines behaviour when the code locks up
-            # # 0: exit
-            # # 1: reset and try the next star (does this work?!)
-            # alarm_procedure:1,
-            # # exit on eval failure?
-            # exit_on_eval_failure:1,
-            # ## functions: these should be set by perl lexical name
-            # ## (they are automatically converted to function pointers
-            # ## at runtime)
-            # # function to be called just before a thread is created
-            # thread_precreate_function:undef,
-            #     thread_precreate_function_pointer:undef,
-            # # function to be called just after a thread is created
-            # # (from inside the thread just before *grid () call)
-            # threads_entry_function:undef,
-            #     threads_entry_function_pointer:undef,
-            # # function to be called just after a thread is finished
-            # # (from inside the thread just after *grid () call)
-            # threads_flush_function:undef,
-            # threads_flush_function_pointer:undef,
-            # # function to be called just after a thread is created
-            # # (but external to the thread)
-            # thread_postrun_function:undef,
-            # thread_postrun_function_pointer:undef,
-            # # function to be called just before a thread join
-            # # (external to the thread)
-            # thread_prejoin_function:undef,
-            # thread_prejoin_function_pointer:undef,
-            # # default to using the internal join_flexigrid_thread function
-            # threads_join_function:'binary_grid2::join_flexigrid_thread',
-            # threads_join_function_pointer:sub{return $self->join_flexigrid_thread(@_)},
-            # # function to be called just after a thread join
-            # # (external to the thread)
-            # thread_postjoin_function:undef,
-            # thread_postjoin_function_pointer:undef,
-            # # usually, parse_bse in the main script is called
-            # parse_bse_function:'main::parse_bse',
-            #     parse_bse_function_pointer:undef,
-            # # if starting_snapshot_file is defined, load initial
-            # # values for the grid from the snapshot file rather
-            # # than a normal initiation: this enables you to
-            # # stop and start a grid
-            # starting_sn apshot_file:undef,
+            # control flow
+            'rungrid' : 1, # usually run the grid, but can be 0 to skip it (e.g. for condor/slurm admin)
+            
         }
 
 
@@ -532,8 +278,61 @@ class grid_options_defaults():
             "_store_memaddr": "Memory address of the store object for binary_c.",
             "failed_systems_threshold": "Variable storing the maximum number of systems that are allowed to fail before logging their command line arguments to failed_systems log files",
             "parse_function": "Function that the user can provide to handle the output the binary_c. This function has to take the arguments (self, output). Its best not to return anything in this function, and just store stuff in the self.grid_results dictionary, or just output results to a file",
-            "condor": "Int flag whether to use a condor type population evolution. Not implemented yet.",  # TODO: describe this in more detail
-            "slurm": "Int flag whether to use a Slurm type population evolution.",  # TODO: describe this in more detail
+            ############################################################
+            # Condor
+            "condor": "Integer flag used to control HTCondor (referred to as Condor here) jobs. Default is 0 which means no Condor. 1 means launch Condor jobs. Do not manually set this to 2 (run Condor jobs) or 3 (join Condor job data) unless you know what you are doing, this is usually done for you.",
+            "condor_dir": "Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Condor jobs."
+            "condor_njobs": "number of jobs that Condor will run",
+            "condor_ClusterID": "Condor ClusterID variable, equivalent to Slurm's jobid",
+            "condor_Process": "Condor Process variable, equivalent to Slurm's jobarrayindex",
+            "condor_postpone_submit": "Debugging tool. If 1, the condor script is not submitted (useful for debugging). Default 0.",
+            "condor_postpone_join": "Use to delay the joining of Condor grid data. If 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM). Default 0.",
+            "condor_memory": "Integer. In MB, the memory use (ImageSize) of the job.",
+            "condor_warn_max_memory": "Integer. In MB, the memory use (ImageSize) of the job.",
+            "condor_universe": "The HTCondor \"universe\": this is \"vanilla\" by default.",
+            "condor_extra_settings": "Dictionary. Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.",
+            'condor_snapshot_on_kill':"Integer. If 1 we save a snapshot on SIGKILL before exit.",
+            'condor_stream_output':"Boolean. If True, we activate Condor's stdout stream. If False, this data is copied at the end of the job.",
+            'condor_stream_error':"Boolean. If True, we activate Condor's stderr stream. If False, this data is copied at the end of the job.",
+            'condor_should_transfer_files' : "Condor's option to transfer files at the end of the job. You should set this to \"YES\"",
+            'condor_when_to_transfer_output' : "Condor's option to decide when output files are transferred. You should usually set this to \"ON_EXIT_OR_EVICT\"",
+            'condor_requirements':'', "Condor job requirements. These are passed to Condor directly, you should read the HTCondor manual to learn about this. If no requirements exist, leave as an string.",
+            'condor_env' : "Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'condor_bash' : "Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'condor_pwd' : "Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'condor_date' : "Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+
+            "condor_initial_dir" : "Directory from which condor scripts are run. If set to the default, None, this is the directory from which your script is run.",
+            "condor_submit" : "The Condor_submit command, usually \"/usr/bin/condor_submit\" but will depend on your HTCondor installation.",
+            "condor_getenv" : "Boolean. If True, the default, condor takes the environment at submission and copies it to the jobs. You almost certainly want this to be True.",
+            "condor_batchname" : "Condor batchname option: this is what appears in condor_q. Defaults to \"binary_c-condor\"", 
+            "condor_kill_sig" : "Signal Condor should use to stop a process. Note that grid.py expects this to be "SIGINT" which is the default.",
+            ############################################################
+            # Slurm options
+            ############################################################
+            "slurm": "Integer flag used to control Slurm jobs. Default is 0 which means no Slurm. 1 means launch Slurm jobs. Do not manually set this to 2 (run Slurm jobs) or 3 (join Slurm job data) unless you know what you are doing, this is usually done for you.",
+            "slurm_dir": "Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Slurm jobs.",
+            "slurm_ntasks": "Integer. Number of CPUs required per array job: usually only need this to be 1 (the default)."
+            "slurm_njobs": "Number of Slurm jobs to be launched.",
+            "slurm_jobid": "Slurm job id. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.",
+            "slurm_jobarrayindex": "Slurm job array index. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.",
+            "slurm_memory": "memory required for the job. Should be in megabytes, e.g. \"512MB\" (the default).",
+            "slurm_warn_max_memory": "If we set slurm_memory in excess of this, warn the user because this is usually a mistake. (Default \"1024MB\".)",
+            "slurm_postpone_join": "Integer, default 0. If 1 do not join job results with Slurm, instead you have to do it later manually.",
+            "slurm_jobname": "String which names the Slurm jobs, default \"binary_c-python\".",
+            "slurm_partition": "String containing the Slurm partition name. You should check your local Slurm installation to find out partition information, e.g. using the sview command.",
+            "slurm_time": "The time a Slurm job is allowed to take. Default is 0 which means no limit. Please check the Slurm documentation for required format of this option.", 
+            "slurm_postpone_sbatch": "Integer, default 0. If set to 1, do not launch Slurm jobs with sbatch, just make the scripts that would have.",
+            "slurm_array": "Override for Slurm's --array option, useful for rerunning jobs manually. Default None.",
+            "slurm_array_max_jobs" : "Override for the max number of concurrent Slurm array jobs. Default None.", 
+            "slurm_extra_settings": "Dictionary of extra settings for Slurm to put in its launch script. Please see the Slurm documentation for the many options that are available to you.",
+            "slurm_sbatch": "The Slurm \"sbatch\" submission command, usually \"/usr/bin/sbatch\" but will depend on your Slurm installation. By default is set automatically.",
+            'slurm_env' : "Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'slurm_bash' : "Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'slurm_pwd' : "Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+            'slurm_date' : "Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
+
+            ############################################################
             "weight": "Weight factor for each system. The calculated probability is multiplied by this. If the user wants each system to be repeated several times, then this variable should not be changed, rather change the _repeat variable instead, as that handles the reduction in probability per system. This is useful for systems that have a process with some random element in it.",  # TODO: add more info here, regarding the evolution splitting.
             "repeat": "Factor of how many times a system should be repeated. Consider the evolution splitting binary_c argument for supernovae kick repeating.",  # TODO: make sure this is used.
             "evolution_type": "Variable containing the type of evolution used of the grid. Multiprocessing, linear processing or possibly something else (e.g. for Slurm or Condor).",