Skip to content
Snippets Groups Projects
Commit b0d8e176 authored by Izzard's avatar Izzard
Browse files

fix typos in the grid defaults

attempt to fix (finally?) the joingingfile logic
parent 31fbe44e
No related branches found
No related tags found
No related merge requests found
......@@ -52,6 +52,9 @@ class HPC(condor,slurm):
id : the job ID number, or self.HPC_jobID_tuple()[0] if None (default=None).
overwrite : if True, overwrite an existing joiningfile (default=False)
error_on_overwite : if True, and we try to overwrite, issue and error and exit (default=False)
Returns:
True if the file is made, False otherwise.
"""
......@@ -69,22 +72,25 @@ class HPC(condor,slurm):
# check the joiningfile doesn't exist
if not overwrite and os.path.isfile(file):
# file already exists
print("Cannot make joiningfile at {file} because it already exists.".format(file=file))
# perhaps cause an error if it does
# perhaps exit here? (e.g. for debugging)
if error_on_overwrite:
self.exit(code=1)
# write the joiningfile
print("Making joiningfile at {file} with range 0 to {n}".format(
file=file,
n=n
))
with open(file,"w",encoding="utf-8") as f:
for i in range(0,n):
f.write(os.path.join(prefix,
"{i}.gz\n".format(i=i)))
f.close()
return
x = False
else:
# write the joiningfile
print("Making joiningfile at {file} with range 0 to {n}".format(
file=file,
n=n
))
with open(file,"w",encoding="utf-8") as f:
for i in range(0,n):
f.write(os.path.join(prefix,
"{i}.gz\n".format(i=i)))
f.close()
x = True
return x
def HPC_joinfiles(self,joinlist=None):
"""
......
......@@ -201,7 +201,7 @@ class grid_options_defaults():
########################################
"condor": 0, # 1 to use condor, 0 otherwise
"condor_dir": "", # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all)
"condor_njobs": "", # number of scripts/jobs that CONDOR will run in total
"condor_njobs": 0, # number of scripts/jobs that CONDOR will run in total
"condor_ClusterID": None, # condor cluster id, equivalent to Slurm's jobid
"condor_Process": None, # condor process, equivalent to Slurm's jobarrayindex
"condor_postpone_submit": 0, # if 1, the condor script is not submitted (useful for debugging). Default 0.
......@@ -281,7 +281,7 @@ class grid_options_defaults():
############################################################
# Condor
"condor": "Integer flag used to control HTCondor (referred to as Condor here) jobs. Default is 0 which means no Condor. 1 means launch Condor jobs. Do not manually set this to 2 (run Condor jobs) or 3 (join Condor job data) unless you know what you are doing, this is usually done for you.",
"condor_dir": "String. Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Condor jobs."
"condor_dir": "String. Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Condor jobs.",
"condor_njobs": "Integer. Number of jobs that Condor will run",
"condor_ClusterID": "Integer. Condor ClusterID variable, equivalent to Slurm's jobid. Jobs are numbered <ClusterID>.<Process>",
"condor_Process": "Integer. Condor Process variable, equivalent to Slurm's jobarrayindex. Jobs are numbered <ClusterID>.<Process>",
......@@ -296,7 +296,7 @@ class grid_options_defaults():
'condor_stream_error':"Boolean. If True, we activate Condor's stderr stream. If False, this data is copied at the end of the job.",
'condor_should_transfer_files' : "Integer. Condor's option to transfer files at the end of the job. You should set this to \"YES\"",
'condor_when_to_transfer_output' : "Integer. Condor's option to decide when output files are transferred. You should usually set this to \"ON_EXIT_OR_EVICT\"",
'condor_requirements':'', "String. Condor job requirements. These are passed to Condor directly, you should read the HTCondor manual to learn about this. If no requirements exist, leave as an string.",
'condor_requirements': "String. Condor job requirements. These are passed to Condor directly, you should read the HTCondor manual to learn about this. If no requirements exist, leave as an string.",
'condor_env' : "String. Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
'condor_bash' : "String. Points the location of the \"bash\" command, e.g. /bin/bash, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
'condor_pwd' : "String. Points the location of the \"pwd\" command, e.g. /bin/pwd, that is used in Condor launch scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.",
......@@ -312,7 +312,7 @@ class grid_options_defaults():
############################################################
"slurm": "Integer flag used to control Slurm jobs. Default is 0 which means no Slurm. 1 means launch Slurm jobs. Do not manually set this to 2 (run Slurm jobs) or 3 (join Slurm job data) unless you know what you are doing, this is usually done for you.",
"slurm_dir": "String. Working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all jobs). This directory should not exist when you launch the Slurm jobs.",
"slurm_ntasks": "Integer. Number of CPUs required per array job: usually only need this to be 1 (the default)."
"slurm_ntasks": "Integer. Number of CPUs required per array job: usually only need this to be 1 (the default).",
"slurm_njobs": "Integer. Number of Slurm jobs to be launched.",
"slurm_jobid": "Integer. Slurm job id. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.",
"slurm_jobarrayindex": "Integer. Slurm job array index. Each job is numbered <slurm_jobid>.<slurm_jobarrayindex>.",
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment