diff --git a/binarycpython/utils/custom_logging_functions.py b/binarycpython/utils/custom_logging_functions.py index aaaa63371377c4a64e6f1731ecc91b2eff126a11..c0d963c43406e25041684c1b318156b9ec4d7866 100644 --- a/binarycpython/utils/custom_logging_functions.py +++ b/binarycpython/utils/custom_logging_functions.py @@ -19,8 +19,10 @@ def autogen_C_logging_code(logging_dict: dict, verbose: int = 0) -> Optional[str Input is a dictionary where the key is the header of that logging line and items which are lists of parameters that will be put in that logging line + The list elements are all appended to 'stardata->' in the autogenerated code. + Example: - input dictionary should look like this:: + Input dictionary should look like this:: {'MY_STELLAR_DATA': [ diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py index 35ab71bd3278ae79646bfc52ee9e1c528e551490..90eb785e97fa2538096bc4621ed50889dfa3ecf3 100644 --- a/binarycpython/utils/grid.py +++ b/binarycpython/utils/grid.py @@ -18,6 +18,9 @@ Tasks: - TODO: consider spreading the functions over more files. - TODO: type the private functions - TODO: fix the correct object types for the default values of the bse_options + - TODO: uncomment and implement the HPC functionality + - TODO: think of a clean and nice way to unload and remove the custom_logging_info library from memory (and from disk) + - TODO: think of a nice way to remove the loaded grid_code/ generator from memory. """ import os @@ -55,16 +58,16 @@ from binarycpython.utils.functions import ( merge_dicts, BinaryCEncoder, ) -from binarycpython.utils.hpc_functions import ( - get_condor_version, - get_slurm_version, - create_directories_hpc, - path_of_calling_script, - get_python_details, -) +# from binarycpython.utils.hpc_functions import ( +# get_condor_version, +# get_slurm_version, +# create_directories_hpc, +# path_of_calling_script, +# get_python_details, +# ) from binarycpython import _binary_c_bindings - +import copy class Population: """ @@ -77,13 +80,20 @@ class Population: Initialisation function of the population class """ + # Different sections of options + + # get binary_c defaults and create a cleaned up dict + # Setting stuff will check against the defaults to see if the input is correct. self.defaults = get_defaults() self.cleaned_up_defaults = self._cleanup_defaults() - - # Different sections of options + + # make the input dictionary self.bse_options = {} # bse_options is just empty. - # Setting stuff will check against the defaults to see if the input is correct. - self.grid_options = grid_options_defaults_dict.copy() + + # Grid options + self.grid_options = copy.deepcopy(grid_options_defaults_dict) + + # Custom options self.custom_options = {} # Argline dict @@ -667,6 +677,14 @@ class Population: # Evolution functions ################################################### + def _pre_run_cleanup(self): + """ + Function to clean up some stuff in the grid before a run (like results, ensemble results etc) + """ + + # empty results + self.grid_options['results'] = {} + def evolve(self) -> None: """ Entrypoint function of the whole object. From here, based on the settings, @@ -681,6 +699,9 @@ class Population: (that doesn't mean this cannot be run on a server with many cores) """ + # Just to make sure we don't have stuff from a previous run hanging around + self._pre_run_cleanup() + # Check which type: if self.grid_options["slurm"] == 1: # Execute slurm subroutines @@ -693,6 +714,29 @@ class Population: # Execute population evolution subroutines self.evolve_population() + # Put all interesting stuff in a variable and output that afterwards, as analytics of the run. + analytics_dict = { + 'population_name': self.grid_options['_population_id'], + 'evolution_type': self.grid_options['evolution_type'], + "failed_count": self.grid_options["_failed_count"], + "failed_prob": self.grid_options["_failed_prob"], + "failed_systems_error_codes": self.grid_options[ + "_failed_systems_error_codes" + ].copy(), + "errors_exceeded": self.grid_options["_errors_exceeded"], + "errors_found": self.grid_options["_errors_found"], + "total_probability": self.grid_options["_probtot"], + "total_count": self.grid_options["_count"], + "start_timestamp": self.grid_options["_start_time_evolution"], + "end_timestamp": self.grid_options["_end_time_evolution"], + } + + ## + # Clean up code: remove files, unset values. This is placed in the general evolve function, because that makes for easier control + self._cleanup() + + return analytics_dict + def evolve_population(self): """ Function to evolve populations. This handles the setting up, evolving @@ -733,6 +777,7 @@ class Population: ) ) + # self.grid_options["_end_time_evolution"] = time.time() # Log and print some information @@ -765,7 +810,7 @@ class Population: "ALL" if not self.grid_options["_errors_exceeded"] else "SOME (only the first ones, as there were too many to log all of them)", - os.path.join(self.grid_options["tmp_dir"], "failed_systemsX.txt"), + os.path.join(self.grid_options["tmp_dir"], "failed_systems_{}_X.txt".format(self.grid_options['_population_id'])), ), self.grid_options["verbosity"], 0, @@ -777,10 +822,6 @@ class Population: 0, ) - ## - # Clean up code: remove files, unset values. - self._cleanup() - def _evolve_population_grid(self): """ Function to evolve the population with multiprocessing approach. @@ -957,8 +998,8 @@ class Population: ], "_errors_exceeded": self.grid_options["_errors_exceeded"], "_errors_found": self.grid_options["_errors_found"], - "_probtot": self.grid_options["_probtot"], - "_count": self.grid_options["_count"], + "_probtot": probability_of_systems_run, + "_count": number_of_systems_run, } verbose_print( @@ -1048,17 +1089,22 @@ class Population: if self.bse_options.get("ensemble", None): if not self.bse_options["ensemble_defer"] == 1: verbose_print( - "Error, if you want to run an ensemble in a population, the output needs to be deferred", + "Error, if you want to run an ensemble in a population, the output needs to be deferred. Please set 'ensemble_defer' to 1", self.grid_options["verbosity"], 0, ) raise ValueError + if not self.grid_options["ensemble_output_name"]: + verbose_print( + "Error: if you want to run an ensemble in a population, please set set 'ensemble_output_name'. It will be combined with 'data_dir' to write the output of the ensembles to", self.grid_options['verbosity'], 0) + raise ValueError + ## Load persistent_data_memaddr if necessary: self._load_persistent_data_memory_dict() # Check which type of population generation - if self.grid_options["population_type"] == "grid": + if self.grid_options["evolution_type"] == "grid": ####################### # Dry run and getting starcount self.grid_options["_probtot"] = 0 @@ -1099,7 +1145,7 @@ class Population: self._load_grid_function() # Source file - elif self.grid_options["population_type"] == "source_file": + elif self.grid_options["evolution_type"] == "source_file": ####################### # Dry run and getting starcount self.grid_options["_probtot"] = 0 @@ -1133,6 +1179,9 @@ class Population: # self._load_grid_function() + + # elif self.grid_options["evolution_type"] == "mc": + ####### def _cleanup(self): @@ -1169,6 +1218,9 @@ class Population: # Unload store _binary_c_bindings.free_store_memaddr(self.grid_options["_store_memaddr"]) + # Unload/free custom_logging_code + # TODO: cleanup custom logging code. + ################################################### # Gridcode functions # @@ -1584,9 +1636,10 @@ class Population: self.grid_options["code_string"] = code_string + # Write to file gridcode_filename = os.path.join( - self.grid_options["tmp_dir"], "example_grid.py" + self.grid_options["tmp_dir"], "binary_c_grid_{}.py".format(self.grid_options["_population_id"]) ) self.grid_options["gridcode_filename"] = gridcode_filename @@ -1768,219 +1821,219 @@ class Population: # subroutines to run SLURM grids ################################################### - def _slurm_grid(self): - """ - Main function that manages the SLURM setup. - - Has three stages: - - - setup - - evolve - - join - - Which stage is used is determined by the value of grid_options['slurm_command']: - - <empty>: the function will know its the user that executed the script and - it will set up the necessary condor stuff - - 'evolve': evolve_population is called to evolve the population of stars - - 'join': We will attempt to join the output - """ - - # Check version - # TODO: Put in function - slurm_version = get_slurm_version() - if not slurm_version: - verbose_print( - "SLURM: Error: No installation of slurm found", - self.grid_options["verbosity"], - 0, - ) - else: - major_version = int(slurm_version.split(".")[0]) - minor_version = int(slurm_version.split(".")[1]) - - if major_version > 17: - verbose_print( - "SLURM: Found version {} which is new enough".format(slurm_version), - self.grid_options["verbosity"], - 1, - ) - else: - verbose_print( - "SLURM: Found version {} which is too old (we require 17+)".format( - slurm_version - ), - self.grid_options["verbosity"], - 0, - ) - - verbose_print( - "SLURM: Running slurm grid. command={}".format( - self.grid_options["slurm_command"] - ), - self.grid_options["verbosity"], - 1, - ) - - if not self.grid_options["slurm_command"]: - # Setting up - verbose_print( - "SLURM: Main controller script. Setting up", - self.grid_options["verbosity"], - 1, - ) + # def _slurm_grid(self): + # """ + # Main function that manages the SLURM setup. - # Set up working directories: - verbose_print( - "SLURM: creating working directories", self.grid_options["verbosity"], 1 - ) - create_directories_hpc(self.grid_options["slurm_dir"]) - - # Create command - python_details = get_python_details() - scriptname = path_of_calling_script() - command = "{} {}".format(python_details["executable"], scriptname) - command += ' --cmdline "{}"'.format( - " ".join( - [ - "{}".format(self.grid_options["_commandline_input"]), - "offset=$jobarrayindex", - "modulo={}".format(self.grid_options["slurm_njobs"]), - "vb={}".format(self.grid_options["verbosity"]), - "slurm_jobid=$jobid", - "slurm_jobarrayindex=$jobarrayindex", - "slurm_jobname='binary_grid_'$jobid'.'$jobarrayindex", - "slurm_njobs={}".format(self.grid_options["slurm_njobs"]), - "slurm_dir={}".format(self.grid_options["slurm_dir"]), - "rungrid=1", - "slurm_command=evolve", - ] - ).strip() - ) + # Has three stages: - # Construct dict with settings for the script while checking the settings at the same time - # Check settings: - # TODO: check settings - # Create SLURM_DIR script: - slurm_script_options = {} - slurm_script_options["n"] = self.grid_options["slurm_njobs"] - slurm_script_options["njobs"] = self.grid_options["slurm_njobs"] - slurm_script_options["dir"] = self.grid_options["slurm_dir"] - slurm_script_options["memory"] = self.grid_options["slurm_memory"] - slurm_script_options["working_dir"] = self.grid_options[ - "slurm_dir" - ] # TODO: check this - slurm_script_options["command"] = command - # slurm_script_options['streams'] = self.grid_options['streams'] - - # Construct the script - slurm_script_contents = "" - slurm_script_contents += "#!/bin/bash\n" - slurm_script_contents += "# Slurm file for binary_grid and slurm\n" - slurm_script_contents += "#SBATCH --error={}/stderr/%A.%a\n".format( - self.grid_options["slurm_dir"] - ) - slurm_script_contents += "#SBATCH --output={}/stdout/%A.%a\n".format( - self.grid_options["slurm_dir"] - ) - slurm_script_contents += "#SBATCH --job-name={}\n".format( - self.grid_options["slurm_jobname"] - ) - slurm_script_contents += "#SBATCH --partition={}\n".format( - self.grid_options["slurm_partition"] - ) - slurm_script_contents += "#SBATCH --time={}\n".format( - self.grid_options["slurm_time"] - ) - slurm_script_contents += "#SBATCH --mem={}\n".format( - self.grid_options["slurm_memory"] - ) - slurm_script_contents += "#SBATCH --ntasks={}\n".format( - self.grid_options["slurm_ntasks"] - ) - slurm_script_contents += "#SBATCH --array={}\n".format( - self.grid_options["slurm_array"] - ) - slurm_script_contents += "\n" - - if self.grid_options["slurm_extra_settings"]: - slurm_script_contents += "# Extra settings by user:" - slurm_script_contents += "\n".join( - [ - "--{}={}".format( - key, self.grid_options["slurm_extra_settings"][key] - ) - for key in self.grid_options["slurm_extra_settings"] - ] - ) + # - setup + # - evolve + # - join - slurm_script_contents += '# set status to "running"\n' - slurm_script_contents += ( - 'echo "running" > {}/status/$jobid.$jobarrayindex\n\n'.format( - self.grid_options["slurm_dir"] - ) - ) - slurm_script_contents += "# run grid of stars\n" - slurm_script_contents += "{}\n\n".format(command) - slurm_script_contents += '# set status to "finished"\n' - slurm_script_contents += ( - 'echo "finished" > {}/status/$jobid.$jobarrayindex\n'.format( - self.grid_options["slurm_dir"] - ) - ) - slurm_script_contents += "\n" + # Which stage is used is determined by the value of grid_options['slurm_command']: - if self.grid_options["slurm_postpone_join"]: - slurm_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all slurm_command=join\n".format( - command, self.grid_options["slurm_dir"] - ) + # <empty>: the function will know its the user that executed the script and + # it will set up the necessary condor stuff - # Write script to file - slurm_script_filename = os.path.join( - self.grid_options["slurm_dir"], "slurm_script" - ) - with open(slurm_script_filename, "w") as slurm_script_file: - slurm_script_file.write(slurm_script_contents) + # 'evolve': evolve_population is called to evolve the population of stars - # Execute or postpone - if self.grid_options["slurm_postpone_sbatch"]: - # Execute or postpone the real call to sbatch - sbatch_command = "sbatch {}".format(slurm_script_filename) - verbose_print( - "running slurm script {}".format(slurm_script_filename), - self.grid_options["verbosity"], - 0, - ) - # subprocess.Popen(sbatch_command, close_fds=True) - # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) - verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) - else: - verbose_print( - "Slurm script is in {} but hasnt been executed".format( - slurm_script_filename - ), - self.grid_options["verbosity"], - 0, - ) - - verbose_print("all done!", self.grid_options["verbosity"], 0) - exit() - - elif self.grid_options["slurm_command"] == "evolve": - # Part to evolve the population. - # TODO: decide how many CPUs - verbose_print( - "SLURM: Evolving population", self.grid_options["verbosity"], 1 - ) - - # - self.evolve_population() + # 'join': We will attempt to join the output + # """ - elif self.grid_options["slurm_command"] == "join": - # Joining the output. - verbose_print("SLURM: Joining results", self.grid_options["verbosity"], 1) + # # Check version + # # TODO: Put in function + # slurm_version = get_slurm_version() + # if not slurm_version: + # verbose_print( + # "SLURM: Error: No installation of slurm found", + # self.grid_options["verbosity"], + # 0, + # ) + # else: + # major_version = int(slurm_version.split(".")[0]) + # minor_version = int(slurm_version.split(".")[1]) + + # if major_version > 17: + # verbose_print( + # "SLURM: Found version {} which is new enough".format(slurm_version), + # self.grid_options["verbosity"], + # 1, + # ) + # else: + # verbose_print( + # "SLURM: Found version {} which is too old (we require 17+)".format( + # slurm_version + # ), + # self.grid_options["verbosity"], + # 0, + # ) + + # verbose_print( + # "SLURM: Running slurm grid. command={}".format( + # self.grid_options["slurm_command"] + # ), + # self.grid_options["verbosity"], + # 1, + # ) + + # if not self.grid_options["slurm_command"]: + # # Setting up + # verbose_print( + # "SLURM: Main controller script. Setting up", + # self.grid_options["verbosity"], + # 1, + # ) + + # # Set up working directories: + # verbose_print( + # "SLURM: creating working directories", self.grid_options["verbosity"], 1 + # ) + # create_directories_hpc(self.grid_options["slurm_dir"]) + + # # Create command + # python_details = get_python_details() + # scriptname = path_of_calling_script() + # command = "{} {}".format(python_details["executable"], scriptname) + # command += ' --cmdline "{}"'.format( + # " ".join( + # [ + # "{}".format(self.grid_options["_commandline_input"]), + # "offset=$jobarrayindex", + # "modulo={}".format(self.grid_options["slurm_njobs"]), + # "vb={}".format(self.grid_options["verbosity"]), + # "slurm_jobid=$jobid", + # "slurm_jobarrayindex=$jobarrayindex", + # "slurm_jobname='binary_grid_'$jobid'.'$jobarrayindex", + # "slurm_njobs={}".format(self.grid_options["slurm_njobs"]), + # "slurm_dir={}".format(self.grid_options["slurm_dir"]), + # "rungrid=1", + # "slurm_command=evolve", + # ] + # ).strip() + # ) + + # # Construct dict with settings for the script while checking the settings at the same time + # # Check settings: + # # TODO: check settings + # # Create SLURM_DIR script: + # slurm_script_options = {} + # slurm_script_options["n"] = self.grid_options["slurm_njobs"] + # slurm_script_options["njobs"] = self.grid_options["slurm_njobs"] + # slurm_script_options["dir"] = self.grid_options["slurm_dir"] + # slurm_script_options["memory"] = self.grid_options["slurm_memory"] + # slurm_script_options["working_dir"] = self.grid_options[ + # "slurm_dir" + # ] # TODO: check this + # slurm_script_options["command"] = command + # # slurm_script_options['streams'] = self.grid_options['streams'] + + # # Construct the script + # slurm_script_contents = "" + # slurm_script_contents += "#!/bin/bash\n" + # slurm_script_contents += "# Slurm file for binary_grid and slurm\n" + # slurm_script_contents += "#SBATCH --error={}/stderr/%A.%a\n".format( + # self.grid_options["slurm_dir"] + # ) + # slurm_script_contents += "#SBATCH --output={}/stdout/%A.%a\n".format( + # self.grid_options["slurm_dir"] + # ) + # slurm_script_contents += "#SBATCH --job-name={}\n".format( + # self.grid_options["slurm_jobname"] + # ) + # slurm_script_contents += "#SBATCH --partition={}\n".format( + # self.grid_options["slurm_partition"] + # ) + # slurm_script_contents += "#SBATCH --time={}\n".format( + # self.grid_options["slurm_time"] + # ) + # slurm_script_contents += "#SBATCH --mem={}\n".format( + # self.grid_options["slurm_memory"] + # ) + # slurm_script_contents += "#SBATCH --ntasks={}\n".format( + # self.grid_options["slurm_ntasks"] + # ) + # slurm_script_contents += "#SBATCH --array={}\n".format( + # self.grid_options["slurm_array"] + # ) + # slurm_script_contents += "\n" + + # if self.grid_options["slurm_extra_settings"]: + # slurm_script_contents += "# Extra settings by user:" + # slurm_script_contents += "\n".join( + # [ + # "--{}={}".format( + # key, self.grid_options["slurm_extra_settings"][key] + # ) + # for key in self.grid_options["slurm_extra_settings"] + # ] + # ) + + # slurm_script_contents += '# set status to "running"\n' + # slurm_script_contents += ( + # 'echo "running" > {}/status/$jobid.$jobarrayindex\n\n'.format( + # self.grid_options["slurm_dir"] + # ) + # ) + # slurm_script_contents += "# run grid of stars\n" + # slurm_script_contents += "{}\n\n".format(command) + # slurm_script_contents += '# set status to "finished"\n' + # slurm_script_contents += ( + # 'echo "finished" > {}/status/$jobid.$jobarrayindex\n'.format( + # self.grid_options["slurm_dir"] + # ) + # ) + # slurm_script_contents += "\n" + + # if self.grid_options["slurm_postpone_join"]: + # slurm_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all slurm_command=join\n".format( + # command, self.grid_options["slurm_dir"] + # ) + + # # Write script to file + # slurm_script_filename = os.path.join( + # self.grid_options["slurm_dir"], "slurm_script" + # ) + # with open(slurm_script_filename, "w") as slurm_script_file: + # slurm_script_file.write(slurm_script_contents) + + # # Execute or postpone + # if self.grid_options["slurm_postpone_sbatch"]: + # # Execute or postpone the real call to sbatch + # sbatch_command = "sbatch {}".format(slurm_script_filename) + # verbose_print( + # "running slurm script {}".format(slurm_script_filename), + # self.grid_options["verbosity"], + # 0, + # ) + # # subprocess.Popen(sbatch_command, close_fds=True) + # # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) + # verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) + # else: + # verbose_print( + # "Slurm script is in {} but hasnt been executed".format( + # slurm_script_filename + # ), + # self.grid_options["verbosity"], + # 0, + # ) + + # verbose_print("all done!", self.grid_options["verbosity"], 0) + # exit() + + # elif self.grid_options["slurm_command"] == "evolve": + # # Part to evolve the population. + # # TODO: decide how many CPUs + # verbose_print( + # "SLURM: Evolving population", self.grid_options["verbosity"], 1 + # ) + + # # + # self.evolve_population() + + # elif self.grid_options["slurm_command"] == "join": + # # Joining the output. + # verbose_print("SLURM: Joining results", self.grid_options["verbosity"], 1) ################################################### # CONDOR functions @@ -1988,249 +2041,249 @@ class Population: # subroutines to run CONDOR grids ################################################### - def _condor_grid(self): - """ - Main function that manages the CONDOR setup. - - Has three stages: - - - setup - - evolve - - join - - Which stage is used is determined by the value of grid_options['condor_command']: - - <empty>: the function will know its the user that executed the script and - it will set up the necessary condor stuff - - 'evolve': evolve_population is called to evolve the population of stars - - 'join': We will attempt to join the output - """ - - # TODO: Put in function - condor_version = get_condor_version() - if not condor_version: - verbose_print( - "CONDOR: Error: No installation of condor found", - self.grid_options["verbosity"], - 0, - ) - else: - major_version = int(condor_version.split(".")[0]) - minor_version = int(condor_version.split(".")[1]) - - if (major_version == 8) and (minor_version > 4): - verbose_print( - "CONDOR: Found version {} which is new enough".format( - condor_version - ), - self.grid_options["verbosity"], - 0, - ) - elif major_version > 9: - verbose_print( - "CONDOR: Found version {} which is new enough".format( - condor_version - ), - self.grid_options["verbosity"], - 0, - ) - else: - verbose_print( - "CONDOR: Found version {} which is too old (we require 8.3/8.4+)".format( - condor_version - ), - self.grid_options["verbosity"], - 0, - ) - - verbose_print( - "Running Condor grid. command={}".format( - self.grid_options["condor_command"] - ), - self.grid_options["verbosity"], - 1, - ) - if not self.grid_options["condor_command"]: - # Setting up - verbose_print( - "CONDOR: Main controller script. Setting up", - self.grid_options["verbosity"], - 1, - ) - - # Set up working directories: - verbose_print( - "CONDOR: creating working directories", - self.grid_options["verbosity"], - 1, - ) - create_directories_hpc(self.grid_options["condor_dir"]) - - # Create command - current_workingdir = os.getcwd() - python_details = get_python_details() - scriptname = path_of_calling_script() - # command = "".join([ - # "{}".python_details['executable'], - # "{}".scriptname, - # "offset=$jobarrayindex", - # "modulo={}".format(self.grid_options['condor_njobs']), - # "vb={}".format(self.grid_options['verbosity']) - - # "results_hash_dumpfile=$self->{_grid_options}{slurm_dir}/results/$jobid.$jobarrayindex", - # 'slurm_jobid='.$jobid, - # 'slurm_jobarrayindex='.$jobarrayindex, - # 'slurm_jobname=binary_grid_'.$jobid.'.'.$jobarrayindex, - # "slurm_njobs=$njobs", - # "slurm_dir=$self->{_grid_options}{slurm_dir}", - # ); - - # Create directory with info for the condor script. By creating this directory we also check whether all the values are set correctly - # TODO: create the condor script. - condor_script_options = {} - # condor_script_options['n'] = - condor_script_options["njobs"] = self.grid_options["condor_njobs"] - condor_script_options["dir"] = self.grid_options["condor_dir"] - condor_script_options["memory"] = self.grid_options["condor_memory"] - condor_script_options["working_dir"] = self.grid_options[ - "condor_working_dir" - ] - condor_script_options["command"] = self.grid_options["command"] - condor_script_options["streams"] = self.grid_options["streams"] - - # TODO: condor works with running an executable. - - # Create script contents - condor_script_contents = "" - condor_script_contents += """ -################################################# -# -# Condor script to run a binary_grid via python -# -################################################# -""" - condor_script_contents += "Executable\t= {}".format(executable) - condor_script_contents += "arguments\t= {}".format(arguments) - condor_script_contents += "environment\t= {}".format(environment) - condor_script_contents += "universe\t= {}".format( - self.grid_options["condor_universe"] - ) - condor_script_contents += "\n" - condor_script_contents += "output\t= {}/stdout/$id\n".format( - self.grid_options["condor_dir"] - ) - condor_script_contents += "error\t={}/sterr/$id".format( - self.grid_options["condor_dir"] - ) - condor_script_contents += "log\t={}\n".format( - self.grid_options["condor_dir"] - ) - condor_script_contents += "initialdir\t={}\n".format(current_workingdir) - condor_script_contents += "remote_initialdir\t={}\n".format( - current_workingdir - ) - condor_script_contents += "\n" - condor_script_contents += "steam_output\t={}".format(stream) - condor_script_contents += "steam_error\t={}".format(stream) - condor_script_contents += "+WantCheckpoint = False" - condor_script_contents += "\n" - condor_script_contents += "request_memory\t={}".format( - self.grid_options["condor_memory"] - ) - condor_script_contents += "ImageSize\t={}".format( - self.grid_options["condor_memory"] - ) - condor_script_contents += "\n" - - if self.grid_options["condor_extra_settings"]: - slurm_script_contents += "# Extra settings by user:" - slurm_script_contents += "\n".join( - [ - "{}\t={}".format( - key, self.grid_options["condor_extra_settings"][key] - ) - for key in self.grid_options["condor_extra_settings"] - ] - ) - - condor_script_contents += "\n" - - # request_memory = $_[0]{memory} - # ImageSize = $_[0]{memory} - - # Requirements = (1) \&\& (". - # $self->{_grid_options}{condor_requirements}.")\n"; - - # - # file name: my_program.condor - # Condor submit description file for my_program - # Executable = my_program - # Universe = vanilla - # Error = logs/err.$(cluster) - # Output = logs/out.$(cluster) - # Log = logs/log.$(cluster) - - # should_transfer_files = YES - # when_to_transfer_output = ON_EXIT - # transfer_input_files = files/in1,files/in2 - - # Arguments = files/in1 files/in2 files/out1 - # Queue - - # Write script contents to file - if self.grid_options["condor_postpone_join"]: - condor_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all condor_command=join\n".format( - command, self.grid_options["condor_dir"] - ) - - condor_script_filename = os.path.join( - self.grid_options["condor_dir"], "condor_script" - ) - with open(condor_script_filename, "w") as condor_script_file: - condor_script_file.write(condor_script_contents) - - if self.grid_options["condor_postpone_sbatch"]: - # Execute or postpone the real call to sbatch - submit_command = "condor_submit {}".format(condor_script_filename) - verbose_print( - "running condor script {}".format(condor_script_filename), - self.grid_options["verbosity"], - 0, - ) - # subprocess.Popen(sbatch_command, close_fds=True) - # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) - verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) - else: - verbose_print( - "Condor script is in {} but hasnt been executed".format( - condor_script_filename - ), - self.grid_options["verbosity"], - 0, - ) - - verbose_print("all done!", self.grid_options["verbosity"], 0) - exit() - - elif self.grid_options["condor_command"] == "evolve": - # TODO: write this function - # Part to evolve the population. - # TODO: decide how many CPUs - verbose_print( - "CONDOR: Evolving population", self.grid_options["verbosity"], 1 - ) - - # - self.evolve_population() - - elif self.grid_options["condor_command"] == "join": - # TODO: write this function - # Joining the output. - verbose_print("CONDOR: Joining results", self.grid_options["verbosity"], 1) - - pass +# def _condor_grid(self): +# """ +# Main function that manages the CONDOR setup. + +# Has three stages: + +# - setup +# - evolve +# - join + +# Which stage is used is determined by the value of grid_options['condor_command']: + +# <empty>: the function will know its the user that executed the script and +# it will set up the necessary condor stuff + +# 'evolve': evolve_population is called to evolve the population of stars + +# 'join': We will attempt to join the output +# """ + +# # TODO: Put in function +# condor_version = get_condor_version() +# if not condor_version: +# verbose_print( +# "CONDOR: Error: No installation of condor found", +# self.grid_options["verbosity"], +# 0, +# ) +# else: +# major_version = int(condor_version.split(".")[0]) +# minor_version = int(condor_version.split(".")[1]) + +# if (major_version == 8) and (minor_version > 4): +# verbose_print( +# "CONDOR: Found version {} which is new enough".format( +# condor_version +# ), +# self.grid_options["verbosity"], +# 0, +# ) +# elif major_version > 9: +# verbose_print( +# "CONDOR: Found version {} which is new enough".format( +# condor_version +# ), +# self.grid_options["verbosity"], +# 0, +# ) +# else: +# verbose_print( +# "CONDOR: Found version {} which is too old (we require 8.3/8.4+)".format( +# condor_version +# ), +# self.grid_options["verbosity"], +# 0, +# ) + +# verbose_print( +# "Running Condor grid. command={}".format( +# self.grid_options["condor_command"] +# ), +# self.grid_options["verbosity"], +# 1, +# ) +# if not self.grid_options["condor_command"]: +# # Setting up +# verbose_print( +# "CONDOR: Main controller script. Setting up", +# self.grid_options["verbosity"], +# 1, +# ) + +# # Set up working directories: +# verbose_print( +# "CONDOR: creating working directories", +# self.grid_options["verbosity"], +# 1, +# ) +# create_directories_hpc(self.grid_options["condor_dir"]) + +# # Create command +# current_workingdir = os.getcwd() +# python_details = get_python_details() +# scriptname = path_of_calling_script() +# # command = "".join([ +# # "{}".python_details['executable'], +# # "{}".scriptname, +# # "offset=$jobarrayindex", +# # "modulo={}".format(self.grid_options['condor_njobs']), +# # "vb={}".format(self.grid_options['verbosity']) + +# # "results_hash_dumpfile=$self->{_grid_options}{slurm_dir}/results/$jobid.$jobarrayindex", +# # 'slurm_jobid='.$jobid, +# # 'slurm_jobarrayindex='.$jobarrayindex, +# # 'slurm_jobname=binary_grid_'.$jobid.'.'.$jobarrayindex, +# # "slurm_njobs=$njobs", +# # "slurm_dir=$self->{_grid_options}{slurm_dir}", +# # ); + +# # Create directory with info for the condor script. By creating this directory we also check whether all the values are set correctly +# # TODO: create the condor script. +# condor_script_options = {} +# # condor_script_options['n'] = +# condor_script_options["njobs"] = self.grid_options["condor_njobs"] +# condor_script_options["dir"] = self.grid_options["condor_dir"] +# condor_script_options["memory"] = self.grid_options["condor_memory"] +# condor_script_options["working_dir"] = self.grid_options[ +# "condor_working_dir" +# ] +# condor_script_options["command"] = self.grid_options["command"] +# condor_script_options["streams"] = self.grid_options["streams"] + +# # TODO: condor works with running an executable. + +# # Create script contents +# condor_script_contents = "" +# condor_script_contents += """ +# ################################################# +# # +# # Condor script to run a binary_grid via python +# # +# ################################################# +# """ +# condor_script_contents += "Executable\t= {}".format(executable) +# condor_script_contents += "arguments\t= {}".format(arguments) +# condor_script_contents += "environment\t= {}".format(environment) +# condor_script_contents += "universe\t= {}".format( +# self.grid_options["condor_universe"] +# ) +# condor_script_contents += "\n" +# condor_script_contents += "output\t= {}/stdout/$id\n".format( +# self.grid_options["condor_dir"] +# ) +# condor_script_contents += "error\t={}/sterr/$id".format( +# self.grid_options["condor_dir"] +# ) +# condor_script_contents += "log\t={}\n".format( +# self.grid_options["condor_dir"] +# ) +# condor_script_contents += "initialdir\t={}\n".format(current_workingdir) +# condor_script_contents += "remote_initialdir\t={}\n".format( +# current_workingdir +# ) +# condor_script_contents += "\n" +# condor_script_contents += "steam_output\t={}".format(stream) +# condor_script_contents += "steam_error\t={}".format(stream) +# condor_script_contents += "+WantCheckpoint = False" +# condor_script_contents += "\n" +# condor_script_contents += "request_memory\t={}".format( +# self.grid_options["condor_memory"] +# ) +# condor_script_contents += "ImageSize\t={}".format( +# self.grid_options["condor_memory"] +# ) +# condor_script_contents += "\n" + +# if self.grid_options["condor_extra_settings"]: +# slurm_script_contents += "# Extra settings by user:" +# slurm_script_contents += "\n".join( +# [ +# "{}\t={}".format( +# key, self.grid_options["condor_extra_settings"][key] +# ) +# for key in self.grid_options["condor_extra_settings"] +# ] +# ) + +# condor_script_contents += "\n" + +# # request_memory = $_[0]{memory} +# # ImageSize = $_[0]{memory} + +# # Requirements = (1) \&\& (". +# # $self->{_grid_options}{condor_requirements}.")\n"; + +# # +# # file name: my_program.condor +# # Condor submit description file for my_program +# # Executable = my_program +# # Universe = vanilla +# # Error = logs/err.$(cluster) +# # Output = logs/out.$(cluster) +# # Log = logs/log.$(cluster) + +# # should_transfer_files = YES +# # when_to_transfer_output = ON_EXIT +# # transfer_input_files = files/in1,files/in2 + +# # Arguments = files/in1 files/in2 files/out1 +# # Queue + +# # Write script contents to file +# if self.grid_options["condor_postpone_join"]: +# condor_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all condor_command=join\n".format( +# command, self.grid_options["condor_dir"] +# ) + +# condor_script_filename = os.path.join( +# self.grid_options["condor_dir"], "condor_script" +# ) +# with open(condor_script_filename, "w") as condor_script_file: +# condor_script_file.write(condor_script_contents) + +# if self.grid_options["condor_postpone_sbatch"]: +# # Execute or postpone the real call to sbatch +# submit_command = "condor_submit {}".format(condor_script_filename) +# verbose_print( +# "running condor script {}".format(condor_script_filename), +# self.grid_options["verbosity"], +# 0, +# ) +# # subprocess.Popen(sbatch_command, close_fds=True) +# # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) +# verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) +# else: +# verbose_print( +# "Condor script is in {} but hasnt been executed".format( +# condor_script_filename +# ), +# self.grid_options["verbosity"], +# 0, +# ) + +# verbose_print("all done!", self.grid_options["verbosity"], 0) +# exit() + +# elif self.grid_options["condor_command"] == "evolve": +# # TODO: write this function +# # Part to evolve the population. +# # TODO: decide how many CPUs +# verbose_print( +# "CONDOR: Evolving population", self.grid_options["verbosity"], 1 +# ) + +# # +# self.evolve_population() + +# elif self.grid_options["condor_command"] == "join": +# # TODO: write this function +# # Joining the output. +# verbose_print("CONDOR: Joining results", self.grid_options["verbosity"], 1) + +# pass ################################################### # Unordered functions @@ -2405,14 +2458,14 @@ class Population: def _increment_probtot(self, prob): """ - Function to add to the total probability + Function to add to the total probability. For now not used """ self.grid_options["_probtot"] += prob def _increment_count(self): """ - Function to add to the total amount of stars + Function to add to the total amount of stars. For now not used """ self.grid_options["_count"] += 1 diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py index 36a1b5ded4e92ce6f2ff3bbf00d6da9e214b0b27..d295e12119d9babff8816989647b2816ee268f43 100644 --- a/binarycpython/utils/grid_options_defaults.py +++ b/binarycpython/utils/grid_options_defaults.py @@ -129,43 +129,45 @@ grid_options_defaults_dict = { # Slurm stuff ######################################## "slurm": 0, # dont use the slurm by default. 1 = use slurm - "slurm_ntasks": 1, # CPUs required per array job: usually only need this - "slurm_command": "", # Command that slurm runs (e.g. evolve or join_datafiles) - "slurm_dir": "", # working directory containing scripts output logs etc. - "slurm_njobs": 0, # number of scripts; set to 0 as default - "slurm_jobid": "", # slurm job id (%A) - "slurm_memory": 512, # in MB, the memory use of the job - "slurm_warn_max_memory": 1024, # in MB : warn if mem req. > this - "slurm_use_all_node_CPUs": 0, # 1 = use all of a node's CPUs. 0 = use a given amount of CPUs - "slurm_postpone_join": 0, # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM) - "slurm_jobarrayindex": "", # slurm job array index (%a) - "slurm_jobname": "binary_grid", # default - "slurm_partition": None, - "slurm_time": 0, # total time. 0 = infinite time - "slurm_postpone_sbatch": 0, # if 1: don't submit, just make the script - "slurm_array": None, # override for --array, useful for rerunning jobs - "slurm_use_all_node_CPUs": 0, # if given nodes, set to 1 - # if given CPUs, set to 0 - # you will want to use this if your Slurm SelectType is e.g. linear - # which means it allocates all the CPUs in a node to the job - "slurm_control_CPUs": 0, # if so, leave this many for Pythons control (0) - "slurm_array": None, # override for --array, useful for rerunning jobs - "slurm_partition": None, # MUST be defined - "slurm_extra_settings": {}, # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. + # "slurm_ntasks": 1, # CPUs required per array job: usually only need this + # "slurm_command": "", # Command that slurm runs (e.g. evolve or join_datafiles) + # "slurm_dir": "", # working directory containing scripts output logs etc. + # "slurm_njobs": 0, # number of scripts; set to 0 as default + # "slurm_jobid": "", # slurm job id (%A) + # "slurm_memory": 512, # in MB, the memory use of the job + # "slurm_warn_max_memory": 1024, # in MB : warn if mem req. > this + # "slurm_use_all_node_CPUs": 0, # 1 = use all of a node's CPUs. 0 = use a given amount of CPUs + # "slurm_postpone_join": 0, # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM) + # "slurm_jobarrayindex": "", # slurm job array index (%a) + # "slurm_jobname": "binary_grid", # default + # "slurm_partition": None, + # "slurm_time": 0, # total time. 0 = infinite time + # "slurm_postpone_sbatch": 0, # if 1: don't submit, just make the script + # "slurm_array": None, # override for --array, useful for rerunning jobs + # "slurm_use_all_node_CPUs": 0, # if given nodes, set to 1 + # # if given CPUs, set to 0 + # # you will want to use this if your Slurm SelectType is e.g. linear + # # which means it allocates all the CPUs in a node to the job + # "slurm_control_CPUs": 0, # if so, leave this many for Pythons control (0) + # "slurm_array": None, # override for --array, useful for rerunning jobs + # "slurm_partition": None, # MUST be defined + # "slurm_extra_settings": {}, # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. ######################################## # Condor stuff ######################################## "condor": 0, # 1 to use condor, 0 otherwise - "condor_command": "", # condor command e.g. "evolve", "join" - "condor_dir": "", # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all) - "condor_njobs": "", # number of scripts/jobs that CONDOR will run in total - "condor_jobid": "", # condor job id - "condor_postpone_join": 0, # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM) - # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed) - "condor_join_pwd": "", # directory the join should be in (defaults to $ENV{PWD} if undef) - "condor_memory": 1024, # in MB, the memory use (ImageSize) of the job - "condor_universe": "vanilla", # usually vanilla universe - "condor_extra_settings": {}, # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. + # "condor_command": "", # condor command e.g. "evolve", "join" + # "condor_dir": "", # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all) + # "condor_njobs": "", # number of scripts/jobs that CONDOR will run in total + # "condor_jobid": "", # condor job id + # "condor_postpone_join": 0, # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM) + # # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed) + # "condor_join_pwd": "", # directory the join should be in (defaults to $ENV{PWD} if undef) + # "condor_memory": 1024, # in MB, the memory use (ImageSize) of the job + # "condor_universe": "vanilla", # usually vanilla universe + # "condor_extra_settings": {}, # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. + + # snapshots and checkpoints # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found @@ -456,7 +458,7 @@ grid_options_descriptions = { "_store_memaddr": "Memory adress of the store object for binary_c.", "failed_systems_threshold": "Variable storing the maximum amount of systems that are allowed to fail before logging their commandline arguments to failed_systems log files", "parse_function": "Function that the user can provide to handle the output the binary_c. This function has to take the arguments (self, output). Its best not to return anything in this function, and just store stuff in the grid_options['results'] dictionary, or just output results to a file", - "condor": "Int flag whether to use a condor type population evolution.", # TODO: describe this in more detail + "condor": "Int flag whether to use a condor type population evolution. Not implemented yet.", # TODO: describe this in more detail "slurm": "Int flag whether to use a slurm type population evolution.", # TODO: describe this in more detail "weight": "Weight factor for each system. The calculated probability is mulitplied by this. If the user wants each system to be repeated several times, then this variable should not be changed, rather change the _repeat variable instead, as that handles the reduction in probability per system. This is useful for systems that have a process with some random element in it.", # TODO: add more info here, regarding the evolution splitting. "repeat": "Factor of how many times a system should be repeated. Consider the evolution splitting binary_c argument for supernovae kick repeating.", # TODO: make sure this is used. diff --git a/binarycpython/utils/hpc_functions.py b/binarycpython/utils/hpc_functions.py index 043614b356517cc33cd1ef171bf7c224660f5959..e9795c86695c2a2934c141742c60e41fe727bf04 100644 --- a/binarycpython/utils/hpc_functions.py +++ b/binarycpython/utils/hpc_functions.py @@ -1,151 +1,151 @@ -""" -File containing functions for HPC computing, distributed tasks on clusters etc. - -Functions that the slurm and condor subroutines of the population object use. - -Mainly divided in 2 sections: Slurm and Condor -""" - -import os -import sys -import time -import subprocess -from typing import Union -import __main__ as main - - -def get_slurm_version() -> Union[str, None]: - """ - Function that checks whether slurm is installed and returns the version if its installed. - - Only tested this with slurm v17+ - - Returns: - slurm version, or None - """ - - slurm_version = None - - try: - slurm_version = ( - subprocess.run(["sinfo", "-V"], stdout=subprocess.PIPE, check=True) - .stdout.decode("utf-8") - .split() - )[1] - except FileNotFoundError as err: - print(err) - print(err.args) - print("Slurm is not installed or not loaded") - except Exception as err: - print(err) - print(err.args) - print("Unknown error, contact me about this") +# """ +# File containing functions for HPC computing, distributed tasks on clusters etc. + +# Functions that the slurm and condor subroutines of the population object use. + +# Mainly divided in 2 sections: Slurm and Condor +# """ + +# import os +# import sys +# import time +# import subprocess +# from typing import Union +# import __main__ as main + + +# def get_slurm_version() -> Union[str, None]: +# """ +# Function that checks whether slurm is installed and returns the version if its installed. + +# Only tested this with slurm v17+ + +# Returns: +# slurm version, or None +# """ + +# slurm_version = None + +# try: +# slurm_version = ( +# subprocess.run(["sinfo", "-V"], stdout=subprocess.PIPE, check=True) +# .stdout.decode("utf-8") +# .split() +# )[1] +# except FileNotFoundError as err: +# print(err) +# print(err.args) +# print("Slurm is not installed or not loaded") +# except Exception as err: +# print(err) +# print(err.args) +# print("Unknown error, contact me about this") - return slurm_version +# return slurm_version -def get_condor_version() -> Union[str, None]: - """ - Function that checks whether slurm is installed and returns the version if its installed. +# def get_condor_version() -> Union[str, None]: +# """ +# Function that checks whether slurm is installed and returns the version if its installed. - otherwise returns None +# otherwise returns None - Result has to be condor v8 or higher +# Result has to be condor v8 or higher - Returns: - condor version, or None - """ +# Returns: +# condor version, or None +# """ - condor_version = None +# condor_version = None - try: - condor_version = ( - subprocess.run( - ["condor_q", "--version"], stdout=subprocess.PIPE, check=True - ) - .stdout.decode("utf-8") - .split() - )[1] - except FileNotFoundError as err: - print("Slurm is not installed or not loaded: ") - print(err) - print(err.args) - except Exception as err: - print("Unknown error, contact me about this: ") - print(err) - print(err.args) +# try: +# condor_version = ( +# subprocess.run( +# ["condor_q", "--version"], stdout=subprocess.PIPE, check=True +# ) +# .stdout.decode("utf-8") +# .split() +# )[1] +# except FileNotFoundError as err: +# print("Slurm is not installed or not loaded: ") +# print(err) +# print(err.args) +# except Exception as err: +# print("Unknown error, contact me about this: ") +# print(err) +# print(err.args) - return condor_version +# return condor_version -def create_directories_hpc(working_dir: str) -> None: - """ - Function to create a set of directories, given a root directory +# def create_directories_hpc(working_dir: str) -> None: +# """ +# Function to create a set of directories, given a root directory - These directories will contain stuff for the HPC runs +# These directories will contain stuff for the HPC runs - Args: - working_dir: main working directory of the run. Under this directory all the dirs will be created - """ +# Args: +# working_dir: main working directory of the run. Under this directory all the dirs will be created +# """ - # Check if working_dir exists - if not os.path.isdir(working_dir): - print("Error. Working directory {} does not exist! Aborting") - raise ValueError +# # Check if working_dir exists +# if not os.path.isdir(working_dir): +# print("Error. Working directory {} does not exist! Aborting") +# raise ValueError - directories_list = [ - "scripts", - "stdout", - "stderr", - "results", - "logs", - "status", - "joining", - ] +# directories_list = [ +# "scripts", +# "stdout", +# "stderr", +# "results", +# "logs", +# "status", +# "joining", +# ] - # Make directories. - for subdir in directories_list: - full_path = os.path.join(working_dir, subdir) - os.makedirs(full_path, exist_ok=True) - - # Since the directories are probably made on some mount which has to go over NFS - # we should explicitly check if they are created - print("Checking if creating the directories has finished...") - directories_exist = False - while directories_exist: - directories_exist = True - - for subdir in directories_list: - full_path = os.path.join(working_dir, subdir) - - if not os.path.isdir(full_path): - time.sleep(1) - directories_exist = False - print("..Finished! Directories exist.") - - -def path_of_calling_script() -> str: - """ - Function to get the name of the script the user executes. - TODO: fix this function. seems not to work properly. - """ - - return main.__file__ - - -def get_python_details() -> dict: - """ - Function to get some info about the used python version and virtualenv etc +# # Make directories. +# for subdir in directories_list: +# full_path = os.path.join(working_dir, subdir) +# os.makedirs(full_path, exist_ok=True) + +# # Since the directories are probably made on some mount which has to go over NFS +# # we should explicitly check if they are created +# print("Checking if creating the directories has finished...") +# directories_exist = False +# while directories_exist: +# directories_exist = True + +# for subdir in directories_list: +# full_path = os.path.join(working_dir, subdir) + +# if not os.path.isdir(full_path): +# time.sleep(1) +# directories_exist = False +# print("..Finished! Directories exist.") + + +# def path_of_calling_script() -> str: +# """ +# Function to get the name of the script the user executes. +# TODO: fix this function. seems not to work properly. +# """ + +# return main.__file__ + + +# def get_python_details() -> dict: +# """ +# Function to get some info about the used python version and virtualenv etc - Returns: - dictionary with python executable, virtual environment and version information. - """ +# Returns: +# dictionary with python executable, virtual environment and version information. +# """ - python_info_dict = {} +# python_info_dict = {} - # - python_info_dict["virtualenv"] = os.getenv("VIRTUAL_ENV") - python_info_dict["executable"] = sys.executable - python_info_dict["version"] = sys.version +# # +# python_info_dict["virtualenv"] = os.getenv("VIRTUAL_ENV") +# python_info_dict["executable"] = sys.executable +# python_info_dict["version"] = sys.version - return python_info_dict +# return python_info_dict