diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py index 7c7533b07f3835493373baaaabc028e40d592bae..a39f5bda30ffee0ae5e0d742e174ba2044e86b65 100644 --- a/binarycpython/utils/grid_options_defaults.py +++ b/binarycpython/utils/grid_options_defaults.py @@ -281,56 +281,56 @@ class grid_options_defaults(): ############################################################ # Condor "condor": "Integer flag used to control HTCondor (referred to as Condor here) jobs. Default is 0 which means no Condor. 1 means launch Condor jobs. Do not manually set this to 2 (run Condor jobs) or 3 (join Condor job data) unless you know what you are doing, this is usually done for you.", - "condor_dir": "Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Condor jobs." - "condor_njobs": "number of jobs that Condor will run", - "condor_ClusterID": "Condor ClusterID variable, equivalent to Slurm's jobid", - "condor_Process": "Condor Process variable, equivalent to Slurm's jobarrayindex", - "condor_postpone_submit": "Debugging tool. If 1, the condor script is not submitted (useful for debugging). Default 0.", - "condor_postpone_join": "Use to delay the joining of Condor grid data. If 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM). Default 0.", + "condor_dir": "String. Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Condor jobs." + "condor_njobs": "Integer. Number of jobs that Condor will run", + "condor_ClusterID": "Integer. Condor ClusterID variable, equivalent to Slurm's jobid. Jobs are numbered <ClusterID>.<Process>", + "condor_Process": "Integer. Condor Process variable, equivalent to Slurm's jobarrayindex. Jobs are numbered <ClusterID>.<Process>", + "condor_postpone_submit": "Integer. Debugging tool. If 1, the condor script is not submitted (useful for debugging). Default 0.", + "condor_postpone_join": "Integer. Use to delay the joining of Condor grid data. If 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM). Default 0.", "condor_memory": "Integer. In MB, the memory use (ImageSize) of the job.", "condor_warn_max_memory": "Integer. In MB, the memory use (ImageSize) of the job.", - "condor_universe": "The HTCondor \"universe\": this is \"vanilla\" by default.", + "condor_universe": "String. The HTCondor \"universe\": this is \"vanilla\" by default.", "condor_extra_settings": "Dictionary. Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so.", 'condor_snapshot_on_kill':"Integer. If 1 we save a snapshot on SIGKILL before exit.", 'condor_stream_output':"Boolean. If True, we activate Condor's stdout stream. If False, this data is copied at the end of the job.", 'condor_stream_error':"Boolean. If True, we activate Condor's stderr stream. If False, this data is copied at the end of the job.", - 'condor_should_transfer_files' : "Condor's option to transfer files at the end of the job. You should set this to \"YES\"", - 'condor_when_to_transfer_output' : "Condor's option to decide when output files are transferred. You should usually set this to \"ON_EXIT_OR_EVICT\"", - 'condor_requirements':'', "Condor job requirements. These are passed to Condor directly, you should read the HTCondor manual to learn about this. If no requirements exist, leave as an string.", - 'condor_env' : "Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'condor_bash' : "Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'condor_pwd' : "Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'condor_date' : "Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - - "condor_initial_dir" : "Directory from which condor scripts are run. If set to the default, None, this is the directory from which your script is run.", - "condor_submit" : "The Condor_submit command, usually \"/usr/bin/condor_submit\" but will depend on your HTCondor installation.", + 'condor_should_transfer_files' : "Integer. Condor's option to transfer files at the end of the job. You should set this to \"YES\"", + 'condor_when_to_transfer_output' : "Integer. Condor's option to decide when output files are transferred. You should usually set this to \"ON_EXIT_OR_EVICT\"", + 'condor_requirements':'', "String. Condor job requirements. These are passed to Condor directly, you should read the HTCondor manual to learn about this. If no requirements exist, leave as an string.", + 'condor_env' : "String. Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'condor_bash' : "String. Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'condor_pwd' : "String. Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'condor_date' : "String. Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + + "condor_initial_dir" : "String. Directory from which condor scripts are run. If set to the default, None, this is the directory from which your script is run.", + "condor_submit" : "String. The Condor_submit command, usually \"/usr/bin/condor_submit\" but will depend on your HTCondor installation.", "condor_getenv" : "Boolean. If True, the default, condor takes the environment at submission and copies it to the jobs. You almost certainly want this to be True.", - "condor_batchname" : "Condor batchname option: this is what appears in condor_q. Defaults to \"binary_c-condor\"", - "condor_kill_sig" : "Signal Condor should use to stop a process. Note that grid.py expects this to be "SIGINT" which is the default.", + "condor_batchname" : "String. Condor batchname option: this is what appears in condor_q. Defaults to \"binary_c-condor\"", + "condor_kill_sig" : "String. Signal Condor should use to stop a process. Note that grid.py expects this to be \"SIGINT\" which is the default.", ############################################################ # Slurm options ############################################################ "slurm": "Integer flag used to control Slurm jobs. Default is 0 which means no Slurm. 1 means launch Slurm jobs. Do not manually set this to 2 (run Slurm jobs) or 3 (join Slurm job data) unless you know what you are doing, this is usually done for you.", - "slurm_dir": "Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Slurm jobs.", + "slurm_dir": "String. Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Slurm jobs.", "slurm_ntasks": "Integer. Number of CPUs required per array job: usually only need this to be 1 (the default)." - "slurm_njobs": "Number of Slurm jobs to be launched.", - "slurm_jobid": "Slurm job id. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.", - "slurm_jobarrayindex": "Slurm job array index. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.", - "slurm_memory": "memory required for the job. Should be in megabytes, e.g. \"512MB\" (the default).", - "slurm_warn_max_memory": "If we set slurm_memory in excess of this, warn the user because this is usually a mistake. (Default \"1024MB\".)", + "slurm_njobs": "Integer. Number of Slurm jobs to be launched.", + "slurm_jobid": "Integer. Slurm job id. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.", + "slurm_jobarrayindex": "Integer. Slurm job array index. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.", + "slurm_memory": "String. Memory required for the job. Should be in megabytes in a format that Slurm understands, e.g. \"512MB\" (the default).", + "slurm_warn_max_memory": "String. If we set slurm_memory in excess of this, warn the user because this is usually a mistake. Default \"1024MB\".", "slurm_postpone_join": "Integer, default 0. If 1 do not join job results with Slurm, instead you have to do it later manually.", "slurm_jobname": "String which names the Slurm jobs, default \"binary_c-python\".", "slurm_partition": "String containing the Slurm partition name. You should check your local Slurm installation to find out partition information, e.g. using the sview command.", - "slurm_time": "The time a Slurm job is allowed to take. Default is 0 which means no limit. Please check the Slurm documentation for required format of this option.", + "slurm_time": "String. The time a Slurm job is allowed to take. Default is 0 which means no limit. Please check the Slurm documentation for required format of this option.", "slurm_postpone_sbatch": "Integer, default 0. If set to 1, do not launch Slurm jobs with sbatch, just make the scripts that would have.", - "slurm_array": "Override for Slurm's --array option, useful for rerunning jobs manually. Default None.", - "slurm_array_max_jobs" : "Override for the max number of concurrent Slurm array jobs. Default None.", + "slurm_array": "String. Override for Slurm's --array option, useful for rerunning jobs manually. Default None.", + "slurm_array_max_jobs" : "Integer. Override for the max number of concurrent Slurm array jobs. Default None.", "slurm_extra_settings": "Dictionary of extra settings for Slurm to put in its launch script. Please see the Slurm documentation for the many options that are available to you.", - "slurm_sbatch": "The Slurm \"sbatch\" submission command, usually \"/usr/bin/sbatch\" but will depend on your Slurm installation. By default is set automatically.", - 'slurm_env' : "Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'slurm_bash' : "Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'slurm_pwd' : "Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", - 'slurm_date' : "Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + "slurm_sbatch": "String. The Slurm \"sbatch\" submission command, usually \"/usr/bin/sbatch\" but will depend on your Slurm installation. By default is set automatically.", + 'slurm_env' : "String. Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'slurm_bash' : "String. Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'slurm_pwd' : "String. Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", + 'slurm_date' : "String. Points the location of the \"date\" command, e.g. /usr/bin/date, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", ############################################################ "weight": "Weight factor for each system. The calculated probability is multiplied by this. If the user wants each system to be repeated several times, then this variable should not be changed, rather change the _repeat variable instead, as that handles the reduction in probability per system. This is useful for systems that have a process with some random element in it.", # TODO: add more info here, regarding the evolution splitting.