diff --git a/binarycpython.yml b/binarycpython.yml deleted file mode 100644 index d97fb93e7f789b270c8c00b300db233292911cdb..0000000000000000000000000000000000000000 --- a/binarycpython.yml +++ /dev/null @@ -1,14 +0,0 @@ -# /user/HS128/dh00601/.config/tmuxinator/binaryc.yml - -name: binarycpython -root: <%= ENV["BINARYC_PYTHON"] %> - - -windows: - - binarycpython: - layout: tiled - panes: - - ls - - subl . - - cd $BINARYC_DIR/ - - cd $BINARYC_DATA_ROOT/ \ No newline at end of file diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py index 8ad24f1e6a2e6cc1baca342c01e138cbed77f8c0..85cfc27e9b638c63966a270f0d8bcb64adc00098 100644 --- a/binarycpython/utils/functions.py +++ b/binarycpython/utils/functions.py @@ -155,90 +155,163 @@ def parse_binary_c_version_info(version_info_string): version_info_dict = {} - for line in version_info_string.splitlines(): - line = line.strip() - if line == "": - continue - if " is " in line: - split = line.split(" is ") - version_info_dict[split[0].strip()] = split[1].strip() - else: - if line.startswith("Binary_c/nucsyn"): - version_info_dict["intro"] = line - elif line.startswith("Email"): - emails = line.split("Email ")[1].split(",") - cleaned_emails = [email.strip() for email in emails] - version_info_dict["emails"] = cleaned_emails - elif line.startswith("DTlimit"): - split = line.split(" : ") - version_info_dict[split[0]] = ": ".join(split[1:]) - elif line.startswith("Version"): - split = line.split("Version ") - version_number = split[1] - version_info_dict["version_number"] = version_number - elif line.startswith("git URL"): - split = line.split("git URL ") - git_url = split[1] - version_info_dict["git_url"] = git_url - elif line.startswith("Build: "): - split = line.split("Build: ") - build = split[1] - version_info_dict["build"] = build - elif line.startswith("Compiled for "): - split = line.split("Compiled for ") - compiled_for = split[1] - version_info_dict["compiled_for"] = compiled_for - elif line.startswith("Stack limit "): - split = line.split("Stack limit ") - stack_limit = split[1] - version_info_dict["stack_limit"] = stack_limit - elif line.startswith("SVN URL "): - split = line.split("SVN URL ") - svn_url = split[1] - version_info_dict["svn_url"] = svn_url - elif line.startswith("git branch "): - split = line.split("git branch ") - git_branch = split[1] - version_info_dict["git_branch"] = git_branch - elif line.startswith("_SC_CLK_TCK"): - split = line.split(" = ") - _SC_CLK_TCK = split[1] - version_info_dict["_SC_CLK_TCK"] = _SC_CLK_TCK - elif line.startswith("Random number mean "): - split = line.split("Random number mean ") - random_number_mean = split[1] - version_info_dict["Random number mean"] = random_number_mean - elif line.startswith("SVN revision "): - split = line.split("SVN revision ") - svn_revision = split[1] - version_info_dict["svn_revision"] = svn_revision - elif line.startswith("Size of :"): - split = line.split("Size of :") - data_type_sizes = split[1] - version_info_dict["data_type_sizes"] = data_type_sizes - elif line.startswith("git revision "): - split = line.split("git revision ") - git_revision = split[1] - version_info_dict["git_revision"] = git_revision - elif line.startswith("BINARY_C_PRE_VERSION "): - split = line.split("BINARY_C_PRE_VERSION ") - binary_c_pre_version = split[1] - version_info_dict["binary_c_pre_version"] = binary_c_pre_version - elif line.startswith("Comenv accretion:"): - split = line.split("Comenv accretion:") - comenv_accretion = split[1] - version_info_dict["comenv_accretion"] = comenv_accretion - elif line.startswith("Compiled in parameters:"): - split = line.split("Compiled in parameters:") - compiled_in_parameters = split[1] - version_info_dict["compiled_in_parameters"] = compiled_in_parameters - elif line.startswith("__short__ is"): - split = line.split("__short__ is") - short_type = split[1] - version_info_dict["short_type"] = short_type - else: - print("Still found unmatched items!:\n{}".format(repr(line))) + # Clean data and put in correct shape + splitted = version_info_string.strip().splitlines() + cleaned = set([el.strip() for el in splitted if not el == ""]) + + ########################## + # Isotopes: + # Split off + isotopes = set([el for el in cleaned if el.startswith('Isotope ')]) + cleaned = cleaned - isotopes + + isotope_dict = {} + for el in isotopes: + split_info = el.split("Isotope ")[-1].strip().split(" is ") + + isotope_info = split_info[-1] + name = isotope_info.split(' ')[0].strip() + + # Get details + mass_g = float(isotope_info.split(",")[0].split('(')[1].split("=")[-1][:-2].strip()) + mass_amu = float(isotope_info.split(",")[0].split('(')[-1].split("=")[-1].strip()) + mass_mev = float(isotope_info.split(",")[-3].split("=")[-1].replace(")", "").strip()) + A = int(isotope_info.split(",")[-1].strip().split("=")[-1].replace(")", "")) + Z = int(isotope_info.split(",")[-2].strip().split("=")[-1]) + + # + isotope_dict[int(split_info[0])] = {'name': name, 'Z': Z, 'A': A, 'mass_mev': mass_mev, 'mass_g': mass_g, 'mass_amu': mass_amu} + version_info_dict['isotopes'] = isotope_dict + + ########################## + # Argpairs: + # Split off + argpairs = set([el for el in cleaned if el.startswith('ArgPair')]) + cleaned = cleaned - argpairs + argpair_dict = {} + for el in sorted(argpairs): + split_info = el.split("ArgPair ")[-1].split(" ") + + if not argpair_dict.get(split_info[0], None): + argpair_dict[split_info[0]] = {split_info[1]: split_info[2]} + else: + argpair_dict[split_info[0]][split_info[1]] = split_info[2] + + version_info_dict['argpairs'] = argpair_dict + + ########################## + # ensembles: + # Split off + ensembles = set([el for el in cleaned if el.startswith('Ensemble')]) + cleaned = cleaned - ensembles + + ensemble_dict = {} + for el in ensembles: + split_info = el.split("Ensemble ")[-1].split(" is ") + if len(split_info)>1: + ensemble_dict[int(split_info[0])] = split_info[-1] + version_info_dict['ensembles'] = ensemble_dict + + ########################## + # macros: + # Split off + macros = set([el for el in cleaned if el.startswith('macroxyz')]) + cleaned = cleaned - macros + + param_type_dict = { + 'STRING': str, + 'FLOAT': float, + 'MACRO': str, + 'INT': int, + 'LONG_INT': int, + } + + macros_dict = {} + for el in macros: + split_info = el.split("macroxyz ")[-1].split(" : ") + param_type = split_info[0] + + new_split = "".join(split_info[1:]).split(" is ") + param_name = new_split[0] + param_value = " is ".join(new_split[1:]) + # Sometimes the macros have extra information behind it. Needs an update in outputting by binary_c + try: + macros_dict[param_name] = param_type_dict[param_type](param_value) + except ValueError: + macros_dict[param_name] = str(param_value) + version_info_dict['macros'] = macros_dict + + ########################## + # Elements: + # Split off: + elements = set([el for el in cleaned if el.startswith('Element')]) + cleaned = cleaned - elements + + # Fill dict: + elements_dict = {} + for el in elements: + split_info = el.split("Element ")[-1].split(" : ") + name_info = split_info[0].split(" is ") + + # get isotope info + isotopes = {} + if not split_info[-1][0]=='0': + isotope_string = split_info[-1].split(" = ")[-1] + isotopes = {int(split_isotope.split("=")[0]):split_isotope.split("=")[1] for split_isotope in isotope_string.split(" ")} + + elements_dict[int(name_info[0])] = {'name': name_info[-1], 'atomic_number': int(name_info[0]), 'amt_isotopes': len(isotopes), 'isotopes': isotopes} + version_info_dict['elements'] = version_info_dict + + ########################## + # dt_limits: + # split off + dt_limits = set([el for el in cleaned if el.startswith('DTlimit')]) + cleaned = cleaned - dt_limits + + # Fill dict + dt_limits_dict = {} + for el in dt_limits: + split_info = el.split("DTlimit ")[-1].split(" : ") + dt_limits_dict[split_info[1].strip()] = {'index': int(split_info[0]), 'value': float(split_info[-1])} + + version_info_dict['dt_limits'] = dt_limits_dict + + ########################## + # Nucleosynthesis sources: + # Split off + nucsyn_sources = set([el for el in cleaned if el.startswith('Nucleosynthesis')]) + cleaned = cleaned - nucsyn_sources + + # Fill dict + nucsyn_sources_dict = {} + for el in nucsyn_sources: + split_info = el.split("Nucleosynthesis source")[-1].strip().split(" is ") + nucsyn_sources_dict[int(split_info[0])] = split_info[-1] + + version_info_dict['nucleosynthesis_sources'] = nucsyn_sources_dict + + ########################## + # miscellaneous: + # All those that I didnt catch with the above filters. Could try to get some more out though. + # TODO: filter a bit more. + + misc_dict = {} + git_revision = [el for el in cleaned if el.startswith('git revision')] + misc_dict['git_revision'] = git_revision[0].split("git revision ")[-1].replace("\"", '') + + git_branch = [el for el in cleaned if el.startswith('git branch')] + misc_dict['git_branch'] = git_branch[0].split("git branch ")[-1].replace("\"", '') + + build = [el for el in cleaned if el.startswith('Build')] + misc_dict['build'] = build[0].split("Build: ")[-1].replace("\"", '') + + email = [el for el in cleaned if el.startswith('Email')] + misc_dict['email'] = email[0].split("Email ")[-1].split(',') + + misc_dict['uncaught'] = cleaned + + version_info_dict['miscellaneous'] = misc_dict return version_info_dict @@ -727,7 +800,9 @@ def inspect_dict(dict_1, indent=0, print_structure=True): if print_structure: print("\t" * indent, key, type(value)) if isinstance(value, dict): - structure_dict[key] = inspect_dict(value, indent=indent + 1, print_structure=print_structure) + structure_dict[key] = inspect_dict( + value, indent=indent + 1, print_structure=print_structure + ) return structure_dict diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py index 1d3439d3e6bc7fb8391a8fce37f6a730e8fde571..1d8bae3d4de3af95e176498be844d206343fd750 100644 --- a/binarycpython/utils/grid.py +++ b/binarycpython/utils/grid.py @@ -69,6 +69,7 @@ import binary_c_python_api # Make this function also an API call. Doest seem to get written to a buffer # that is stored into a python object. rather its just written to stdout + class Population: """ Population Object. Contains all the necessary functions to set up, run and process a @@ -216,7 +217,7 @@ class Population: # Grab the input and split them up, while accepting only non-empty entries cmdline_args = args.cmdline - self.grid_options['commandline_input'] = cmdline_args + self.grid_options["commandline_input"] = cmdline_args split_args = [ cmdline_arg for cmdline_arg in cmdline_args.split(" ") @@ -252,17 +253,17 @@ class Population: return argline def add_grid_variable( - self, - name, - longname, - valuerange, - resolution, - spacingfunc, - probdist, - dphasevol, - parameter_name, - precode=None, - condition=None, + self, + name, + longname, + valuerange, + resolution, + spacingfunc, + probdist, + dphasevol, + parameter_name, + precode=None, + condition=None, ): """ Function to add grid variables to the grid_options. @@ -365,12 +366,12 @@ class Population: return self.defaults def return_all_info( - self, - include_population_settings=True, - include_binary_c_defaults=True, - include_binary_c_version_info=True, - include_binary_c_help_all=True, - ): + self, + include_population_settings=True, + include_binary_c_defaults=True, + include_binary_c_version_info=True, + include_binary_c_help_all=True, + ): """ Function that returns all the information about the population and binary_c """ @@ -399,14 +400,14 @@ class Population: return all_info def export_all_info( - self, - use_datadir=True, - outfile=None, - include_population_settings=True, - include_binary_c_defaults=True, - include_binary_c_version_info=True, - include_binary_c_help_all=True, - ): + self, + use_datadir=True, + outfile=None, + include_population_settings=True, + include_binary_c_defaults=True, + include_binary_c_version_info=True, + include_binary_c_help_all=True, + ): """ Function that exports the all_info to a json file @@ -605,11 +606,11 @@ class Population: """ # Check which type: - if self.grid_options['slurm'] == 1: + if self.grid_options["slurm"] == 1: # Execute slurm subroutines self._slurm_grid() - elif self.grid_options['condor'] == 1: + elif self.grid_options["condor"] == 1: # Execute condor subroutines self._condor_grid() else: @@ -637,8 +638,8 @@ class Population: # Evolve systems: via grid_options one can choose to do this linearly, or # multiprocessing method. if ( - self.grid_options["evolution_type"] - in self.grid_options["evolution_type_options"] + self.grid_options["evolution_type"] + in self.grid_options["evolution_type_options"] ): if self.grid_options["evolution_type"] == "mp": self._evolve_population_mp() @@ -930,9 +931,7 @@ class Population: # Unload functions # Unload store - binary_c_python_api.free_store_memaddr( - self.grid_options["store_memaddr"] - ) + binary_c_python_api.free_store_memaddr(self.grid_options["store_memaddr"]) ################################################### # Gridcode functions @@ -1018,8 +1017,8 @@ class Population: code_string += indent * depth + "# setting probability lists\n" # Prepare the probability for grid_variable_el in sorted( - self.grid_options["grid_variables"].items(), - key=lambda x: x[1]["grid_variable_number"], + self.grid_options["grid_variables"].items(), + key=lambda x: x[1]["grid_variable_number"], ): # Make probabilities dict grid_variable = grid_variable_el[1] @@ -1466,16 +1465,22 @@ class Population: # We can choose to perform a check on the sourcefile, which checks if the lines start with 'binary_c' if check: - source_file_check_filehandle = open(self.grid_options["source_file_filename"], 'r') + source_file_check_filehandle = open( + self.grid_options["source_file_filename"], "r" + ) for line in source_file_check_filehandle: - if not line.startswith('binary_c'): + if not line.startswith("binary_c"): failed = True break if failed: - verbose_print("Error, sourcefile contains lines that do not start with binary_c", self.grid_options["verbosity"], 0) + verbose_print( + "Error, sourcefile contains lines that do not start with binary_c", + self.grid_options["verbosity"], + 0, + ) raise ValueError - source_file_filehandle = open(self.grid_options["source_file_filename"], 'r') + source_file_filehandle = open(self.grid_options["source_file_filename"], "r") self.grid_options["system_generator"] = source_file_filehandle @@ -1493,7 +1498,7 @@ class Population: arg_dict = {} for i in range(0, len(split_line), 2): - if "." in split_line[i+1]: + if "." in split_line[i + 1]: arg_dict[split_line[i]] = float(split_line[i + 1]) else: arg_dict[split_line[i]] = int(split_line[i + 1]) @@ -1530,124 +1535,191 @@ class Population: # TODO: Put in function slurm_version = get_slurm_version() if not slurm_version: - verbose_print("SLURM: Error: No installation of slurm found", - self.grid_options['verbosity'], 0) + verbose_print( + "SLURM: Error: No installation of slurm found", + self.grid_options["verbosity"], + 0, + ) else: major_version = int(slurm_version.split(".")[0]) minor_version = int(slurm_version.split(".")[1]) - if (major_version > 17): - verbose_print("SLURM: Found version {} which is new enough".format(slurm_version), - self.grid_options['verbosity'], 1) + if major_version > 17: + verbose_print( + "SLURM: Found version {} which is new enough".format(slurm_version), + self.grid_options["verbosity"], + 1, + ) else: - verbose_print("SLURM: Found version {} which is too old (we require 17+)".format(slurm_version), - self.grid_options['verbosity'], 0) + verbose_print( + "SLURM: Found version {} which is too old (we require 17+)".format( + slurm_version + ), + self.grid_options["verbosity"], + 0, + ) - verbose_print("SLURM: Running slurm grid. command={}".format(self.grid_options['slurm_command']), - self.grid_options['verbosity'], 1) + verbose_print( + "SLURM: Running slurm grid. command={}".format( + self.grid_options["slurm_command"] + ), + self.grid_options["verbosity"], + 1, + ) - if not self.grid_options['slurm_command']: + if not self.grid_options["slurm_command"]: # Setting up - verbose_print("SLURM: Main controller script. Setting up", self.grid_options['verbosity'], 1) - + verbose_print( + "SLURM: Main controller script. Setting up", + self.grid_options["verbosity"], + 1, + ) # Set up working directories: - verbose_print("SLURM: creating working directories", self.grid_options['verbosity'], 1) - create_directories_hpc(self.grid_options['slurm_dir']) + verbose_print( + "SLURM: creating working directories", self.grid_options["verbosity"], 1 + ) + create_directories_hpc(self.grid_options["slurm_dir"]) # Create command python_details = get_python_details() scriptname = path_of_calling_script() - command = "{} {}".format(python_details['executable'], scriptname) - command += " --cmdline \"{}\"".format(" ".join([ - "{}".format(self.grid_options['commandline_input']), - "offset=$jobarrayindex", - "modulo={}".format(self.grid_options['slurm_njobs']), - "vb={}".format(self.grid_options['verbosity']), - "slurm_jobid=$jobid", - "slurm_jobarrayindex=$jobarrayindex", - "slurm_jobname='binary_grid_'$jobid'.'$jobarrayindex", - "slurm_njobs={}".format(self.grid_options['slurm_njobs']), - "slurm_dir={}".format(self.grid_options['slurm_dir']), - "rungrid=1", - "slurm_command=evolve" - ]).strip()) + command = "{} {}".format(python_details["executable"], scriptname) + command += ' --cmdline "{}"'.format( + " ".join( + [ + "{}".format(self.grid_options["commandline_input"]), + "offset=$jobarrayindex", + "modulo={}".format(self.grid_options["slurm_njobs"]), + "vb={}".format(self.grid_options["verbosity"]), + "slurm_jobid=$jobid", + "slurm_jobarrayindex=$jobarrayindex", + "slurm_jobname='binary_grid_'$jobid'.'$jobarrayindex", + "slurm_njobs={}".format(self.grid_options["slurm_njobs"]), + "slurm_dir={}".format(self.grid_options["slurm_dir"]), + "rungrid=1", + "slurm_command=evolve", + ] + ).strip() + ) # Construct dict with settings for the script while checking the settings at the same time # Check settings: # TODO: check settings # Create SLURM_DIR script: slurm_script_options = {} - slurm_script_options['n'] = self.grid_options['slurm_njobs'] - slurm_script_options['njobs'] = self.grid_options['slurm_njobs'] - slurm_script_options['dir'] = self.grid_options['slurm_dir'] - slurm_script_options['memory'] = self.grid_options['slurm_memory'] - slurm_script_options['working_dir'] = self.grid_options['slurm_dir'] #TODO: check this - slurm_script_options['command'] = command + slurm_script_options["n"] = self.grid_options["slurm_njobs"] + slurm_script_options["njobs"] = self.grid_options["slurm_njobs"] + slurm_script_options["dir"] = self.grid_options["slurm_dir"] + slurm_script_options["memory"] = self.grid_options["slurm_memory"] + slurm_script_options["working_dir"] = self.grid_options[ + "slurm_dir" + ] # TODO: check this + slurm_script_options["command"] = command # slurm_script_options['streams'] = self.grid_options['streams'] # Construct the script slurm_script_contents = "" slurm_script_contents += "#!/bin/bash\n" slurm_script_contents += "# Slurm file for binary_grid and slurm\n" - slurm_script_contents += "#SBATCH --error={}/stderr/%A.%a\n".format(self.grid_options['slurm_dir']) - slurm_script_contents += "#SBATCH --output={}/stdout/%A.%a\n".format(self.grid_options['slurm_dir']) - slurm_script_contents += "#SBATCH --job-name={}\n".format(self.grid_options['slurm_jobname']) - slurm_script_contents += "#SBATCH --partition={}\n".format(self.grid_options['slurm_partition']) - slurm_script_contents += "#SBATCH --time={}\n".format(self.grid_options['slurm_time']) - slurm_script_contents += "#SBATCH --mem={}\n".format(self.grid_options['slurm_memory']) - slurm_script_contents += "#SBATCH --ntasks={}\n".format(self.grid_options['slurm_ntasks']) - slurm_script_contents += "#SBATCH --array={}\n".format(self.grid_options['slurm_array']) + slurm_script_contents += "#SBATCH --error={}/stderr/%A.%a\n".format( + self.grid_options["slurm_dir"] + ) + slurm_script_contents += "#SBATCH --output={}/stdout/%A.%a\n".format( + self.grid_options["slurm_dir"] + ) + slurm_script_contents += "#SBATCH --job-name={}\n".format( + self.grid_options["slurm_jobname"] + ) + slurm_script_contents += "#SBATCH --partition={}\n".format( + self.grid_options["slurm_partition"] + ) + slurm_script_contents += "#SBATCH --time={}\n".format( + self.grid_options["slurm_time"] + ) + slurm_script_contents += "#SBATCH --mem={}\n".format( + self.grid_options["slurm_memory"] + ) + slurm_script_contents += "#SBATCH --ntasks={}\n".format( + self.grid_options["slurm_ntasks"] + ) + slurm_script_contents += "#SBATCH --array={}\n".format( + self.grid_options["slurm_array"] + ) slurm_script_contents += "\n" - if self.grid_options['slurm_extra_settings']: + if self.grid_options["slurm_extra_settings"]: slurm_script_contents += "# Extra settings by user:" - slurm_script_contents += "\n".join(["--{}={}".format(key, self.grid_options['slurm_extra_settings'][key]) for key in self.grid_options['slurm_extra_settings']]) + slurm_script_contents += "\n".join( + [ + "--{}={}".format( + key, self.grid_options["slurm_extra_settings"][key] + ) + for key in self.grid_options["slurm_extra_settings"] + ] + ) - slurm_script_contents += "# set status to \"running\"\n" - slurm_script_contents += "echo \"running\" > {}/status/$jobid.$jobarrayindex\n\n".format(self.grid_options['slurm_dir']) + slurm_script_contents += '# set status to "running"\n' + slurm_script_contents += 'echo "running" > {}/status/$jobid.$jobarrayindex\n\n'.format( + self.grid_options["slurm_dir"] + ) slurm_script_contents += "# run grid of stars\n" slurm_script_contents += "{}\n\n".format(command) - slurm_script_contents += "# set status to \"finished\"\n" - slurm_script_contents += "echo \"finished\" > {}/status/$jobid.$jobarrayindex\n".format(self.grid_options['slurm_dir']) + slurm_script_contents += '# set status to "finished"\n' + slurm_script_contents += 'echo "finished" > {}/status/$jobid.$jobarrayindex\n'.format( + self.grid_options["slurm_dir"] + ) slurm_script_contents += "\n" - if self.grid_options['slurm_postpone_join']: - slurm_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all slurm_command=join\n".format(command, self.grid_options['slurm_dir']) + if self.grid_options["slurm_postpone_join"]: + slurm_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all slurm_command=join\n".format( + command, self.grid_options["slurm_dir"] + ) # Write script to file - slurm_script_filename = os.path.join(self.grid_options['slurm_dir'], 'slurm_script') - with open(slurm_script_filename, 'w') as slurm_script_file: + slurm_script_filename = os.path.join( + self.grid_options["slurm_dir"], "slurm_script" + ) + with open(slurm_script_filename, "w") as slurm_script_file: slurm_script_file.write(slurm_script_contents) # Execute or postpone - if self.grid_options['slurm_postpone_sbatch']: + if self.grid_options["slurm_postpone_sbatch"]: # Execute or postpone the real call to sbatch sbatch_command = "sbatch {}".format(slurm_script_filename) - verbose_print("running slurm script {}".format(slurm_script_filename), - self.grid_options['verbosity'], 0) + verbose_print( + "running slurm script {}".format(slurm_script_filename), + self.grid_options["verbosity"], + 0, + ) # subprocess.Popen(sbatch_command, close_fds=True) # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) - verbose_print("Submitted scripts.", - self.grid_options['verbosity'], 0) + verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) else: - verbose_print("Slurm script is in {} but hasnt been executed".format(slurm_script_filename), - self.grid_options['verbosity'], 0) + verbose_print( + "Slurm script is in {} but hasnt been executed".format( + slurm_script_filename + ), + self.grid_options["verbosity"], + 0, + ) - verbose_print("all done!", self.grid_options['verbosity'], 0) + verbose_print("all done!", self.grid_options["verbosity"], 0) exit() - elif self.grid_options['slurm_command'] == 'evolve': + elif self.grid_options["slurm_command"] == "evolve": # Part to evolve the population. # TODO: decide how many CPUs - verbose_print("SLURM: Evolving population", self.grid_options['verbosity'], 1) + verbose_print( + "SLURM: Evolving population", self.grid_options["verbosity"], 1 + ) # self._evolve_population() - elif self.grid_options['slurm_command'] == 'join': + elif self.grid_options["slurm_command"] == "join": # Joining the output. - verbose_print("SLURM: Joining results", self.grid_options['verbosity'], 1) + verbose_print("SLURM: Joining results", self.grid_options["verbosity"], 1) ################################################### # CONDOR functions @@ -1678,27 +1750,62 @@ class Population: # TODO: Put in function condor_version = get_condor_version() if not condor_version: - verbose_print("CONDOR: Error: No installation of condor found", self.grid_options['verbosity'], 0) + verbose_print( + "CONDOR: Error: No installation of condor found", + self.grid_options["verbosity"], + 0, + ) else: major_version = int(condor_version.split(".")[0]) minor_version = int(condor_version.split(".")[1]) if (major_version == 8) and (minor_version > 4): - verbose_print("CONDOR: Found version {} which is new enough".format(condor_version), self.grid_options['verbosity'], 0) - elif (major_version > 9): - verbose_print("CONDOR: Found version {} which is new enough".format(condor_version), self.grid_options['verbosity'], 0) + verbose_print( + "CONDOR: Found version {} which is new enough".format( + condor_version + ), + self.grid_options["verbosity"], + 0, + ) + elif major_version > 9: + verbose_print( + "CONDOR: Found version {} which is new enough".format( + condor_version + ), + self.grid_options["verbosity"], + 0, + ) else: - verbose_print("CONDOR: Found version {} which is too old (we require 8.3/8.4+)".format(condor_version), self.grid_options['verbosity'], 0) - + verbose_print( + "CONDOR: Found version {} which is too old (we require 8.3/8.4+)".format( + condor_version + ), + self.grid_options["verbosity"], + 0, + ) - verbose_print("Running Condor grid. command={}".format(self.grid_options['condor_command']), self.grid_options['verbosity'], 1) - if not self.grid_options['condor_command']: + verbose_print( + "Running Condor grid. command={}".format( + self.grid_options["condor_command"] + ), + self.grid_options["verbosity"], + 1, + ) + if not self.grid_options["condor_command"]: # Setting up - verbose_print("CONDOR: Main controller script. Setting up", self.grid_options['verbosity'], 1) + verbose_print( + "CONDOR: Main controller script. Setting up", + self.grid_options["verbosity"], + 1, + ) # Set up working directories: - verbose_print("CONDOR: creating working directories", self.grid_options['verbosity'], 1) - create_directories_hpc(self.grid_options['condor_dir']) + verbose_print( + "CONDOR: creating working directories", + self.grid_options["verbosity"], + 1, + ) + create_directories_hpc(self.grid_options["condor_dir"]) # Create command current_workingdir = os.getcwd() @@ -1723,12 +1830,14 @@ class Population: # TODO: create the condor script. condor_script_options = {} # condor_script_options['n'] = - condor_script_options['njobs'] = self.grid_options['condor_njobs'] - condor_script_options['dir'] = self.grid_options['condor_dir'] - condor_script_options['memory'] = self.grid_options['condor_memory'] - condor_script_options['working_dir'] = self.grid_options['condor_working_dir'] - condor_script_options['command'] = self.grid_options['command'] - condor_script_options['streams'] = self.grid_options['streams'] + condor_script_options["njobs"] = self.grid_options["condor_njobs"] + condor_script_options["dir"] = self.grid_options["condor_dir"] + condor_script_options["memory"] = self.grid_options["condor_memory"] + condor_script_options["working_dir"] = self.grid_options[ + "condor_working_dir" + ] + condor_script_options["command"] = self.grid_options["command"] + condor_script_options["streams"] = self.grid_options["streams"] # TODO: condor works with running an executable. @@ -1744,26 +1853,46 @@ class Population: condor_script_contents += "Executable\t= {}".format(executable) condor_script_contents += "arguments\t= {}".format(arguments) condor_script_contents += "environment\t= {}".format(environment) - condor_script_contents += "universe\t= {}".format(self.grid_options['condor_universe']) + condor_script_contents += "universe\t= {}".format( + self.grid_options["condor_universe"] + ) condor_script_contents += "\n" - condor_script_contents += "output\t= {}/stdout/$id\n".format(self.grid_options['condor_dir']) - condor_script_contents += "error\t={}/sterr/$id".format(self.grid_options['condor_dir']) - condor_script_contents += "log\t={}\n".format(self.grid_options['condor_dir']) + condor_script_contents += "output\t= {}/stdout/$id\n".format( + self.grid_options["condor_dir"] + ) + condor_script_contents += "error\t={}/sterr/$id".format( + self.grid_options["condor_dir"] + ) + condor_script_contents += "log\t={}\n".format( + self.grid_options["condor_dir"] + ) condor_script_contents += "initialdir\t={}\n".format(current_workingdir) - condor_script_contents += "remote_initialdir\t={}\n".format(current_workingdir) + condor_script_contents += "remote_initialdir\t={}\n".format( + current_workingdir + ) condor_script_contents += "\n" condor_script_contents += "steam_output\t={}".format(stream) condor_script_contents += "steam_error\t={}".format(stream) condor_script_contents += "+WantCheckpoint = False" condor_script_contents += "\n" - condor_script_contents += "request_memory\t={}".format(self.grid_options['condor_memory']) - condor_script_contents += "ImageSize\t={}".format(self.grid_options['condor_memory']) + condor_script_contents += "request_memory\t={}".format( + self.grid_options["condor_memory"] + ) + condor_script_contents += "ImageSize\t={}".format( + self.grid_options["condor_memory"] + ) condor_script_contents += "\n" - - if self.grid_options['condor_extra_settings']: + if self.grid_options["condor_extra_settings"]: slurm_script_contents += "# Extra settings by user:" - slurm_script_contents += "\n".join(["{}\t={}".format(key, self.grid_options['condor_extra_settings'][key]) for key in self.grid_options['condor_extra_settings']]) + slurm_script_contents += "\n".join( + [ + "{}\t={}".format( + key, self.grid_options["condor_extra_settings"][key] + ) + for key in self.grid_options["condor_extra_settings"] + ] + ) condor_script_contents += "\n" @@ -1773,7 +1902,7 @@ class Population: # Requirements = (1) \&\& (". # $self->{_grid_options}{condor_requirements}.")\n"; - # + # # file name: my_program.condor # Condor submit description file for my_program # Executable = my_program @@ -1790,46 +1919,58 @@ class Population: # Queue # Write script contents to file - if self.grid_options['condor_postpone_join']: - condor_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all condor_command=join\n".format(command, self.grid_options['condor_dir']) + if self.grid_options["condor_postpone_join"]: + condor_script_contents += "{} rungrid=0 results_hash_dumpfile={}/results/$jobid.all condor_command=join\n".format( + command, self.grid_options["condor_dir"] + ) - condor_script_filename = os.path.join(self.grid_options['condor_dir'], 'condor_script') - with open(condor_script_filename, 'w') as condor_script_file: + condor_script_filename = os.path.join( + self.grid_options["condor_dir"], "condor_script" + ) + with open(condor_script_filename, "w") as condor_script_file: condor_script_file.write(condor_script_contents) - if self.grid_options['condor_postpone_sbatch']: + if self.grid_options["condor_postpone_sbatch"]: # Execute or postpone the real call to sbatch submit_command = "condor_submit {}".format(condor_script_filename) - verbose_print("running condor script {}".format(condor_script_filename), - self.grid_options['verbosity'], 0) + verbose_print( + "running condor script {}".format(condor_script_filename), + self.grid_options["verbosity"], + 0, + ) # subprocess.Popen(sbatch_command, close_fds=True) # subprocess.Popen(sbatch_command, creationflags=subprocess.DETACHED_PROCESS) - verbose_print("Submitted scripts.", - self.grid_options['verbosity'], 0) + verbose_print("Submitted scripts.", self.grid_options["verbosity"], 0) else: - verbose_print("Condor script is in {} but hasnt been executed".format(condor_script_filename), - self.grid_options['verbosity'], 0) + verbose_print( + "Condor script is in {} but hasnt been executed".format( + condor_script_filename + ), + self.grid_options["verbosity"], + 0, + ) - verbose_print("all done!", self.grid_options['verbosity'], 0) + verbose_print("all done!", self.grid_options["verbosity"], 0) exit() - elif self.grid_options['condor_command'] == 'evolve': + elif self.grid_options["condor_command"] == "evolve": # TODO: write this function # Part to evolve the population. # TODO: decide how many CPUs - verbose_print("CONDOR: Evolving population", self.grid_options['verbosity'], 1) + verbose_print( + "CONDOR: Evolving population", self.grid_options["verbosity"], 1 + ) # self._evolve_population() - elif self.grid_options['condor_command'] == 'join': + elif self.grid_options["condor_command"] == "join": # TODO: write this function # Joining the output. - verbose_print("CONDOR: Joining results", self.grid_options['verbosity'], 1) + verbose_print("CONDOR: Joining results", self.grid_options["verbosity"], 1) pass - ################################################### # Unordered functions # @@ -1847,7 +1988,7 @@ class Population: pass def write_binary_c_calls_to_file( - self, output_dir=None, output_filename=None, include_defaults=False + self, output_dir=None, output_filename=None, include_defaults=False ): """ Function that loops over the gridcode and writes the generated parameters to a file. @@ -1868,7 +2009,7 @@ class Population: ## check the settings: if self.bse_options.get("ensemble", None): - if self.bse_options['ensemble'] == 1: + if self.bse_options["ensemble"] == 1: if not self.bse_options["ensemble_defer"] == 1: verbose_print( "Error, if you want to run an ensemble in a population, the output needs to be deferred", diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py index 18c001bab40014d8a6b272d34e0f1214608519f6..659245e1d9cd8931d4767d5e261c9c06ed6fa463 100644 --- a/binarycpython/utils/grid_options_defaults.py +++ b/binarycpython/utils/grid_options_defaults.py @@ -17,7 +17,6 @@ grid_options_defaults_dict = { "main_pid": -1, # Placeholder for the main process id of the run. # "output_dir": "commandline_input": "", - ########################## # Execution log: ########################## @@ -73,7 +72,7 @@ grid_options_defaults_dict = { "grid", "source_file", ], # Available choices for type of population generation # TODO: fill later with monte carlo etc - "source_file_filename": None, # filename for the source + "source_file_filename": None, # filename for the source "count": 0, # total count of systems "probtot": 0, # total probability "weight": 1.0, # weighting for the probability @@ -121,46 +120,43 @@ grid_options_defaults_dict = { # Slurm stuff ######################################## "slurm": 0, # dont use the slurm by default. 1 = use slurm - "slurm_ntasks": 1, # CPUs required per array job: usually only need this + "slurm_ntasks": 1, # CPUs required per array job: usually only need this "slurm_command": "", # Command that slurm runs (e.g. evolve or join_datafiles) "slurm_dir": "", # working directory containing scripts output logs etc. - "slurm_njobs": 0, # number of scripts; set to 0 as default - "slurm_jobid": '', # slurm job id (%A) - "slurm_memory": 512, # in MB, the memory use of the job - "slurm_warn_max_memory": 1024, # in MB : warn if mem req. > this - "slurm_use_all_node_CPUs": 0, # 1 = use all of a node's CPUs. 0 = use a given amount of CPUs - "slurm_postpone_join": 0, # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM) - "slurm_jobarrayindex": '', # slurm job array index (%a) - "slurm_jobname": 'binary_grid', # default + "slurm_njobs": 0, # number of scripts; set to 0 as default + "slurm_jobid": "", # slurm job id (%A) + "slurm_memory": 512, # in MB, the memory use of the job + "slurm_warn_max_memory": 1024, # in MB : warn if mem req. > this + "slurm_use_all_node_CPUs": 0, # 1 = use all of a node's CPUs. 0 = use a given amount of CPUs + "slurm_postpone_join": 0, # if 1 do not join on slurm, join elsewhere. want to do it off the slurm grid (e.g. with more RAM) + "slurm_jobarrayindex": "", # slurm job array index (%a) + "slurm_jobname": "binary_grid", # default "slurm_partition": None, - "slurm_time": 0, # total time. 0 = infinite time - "slurm_postpone_sbatch": 0, # if 1: don't submit, just make the script - "slurm_array": None,# override for --array, useful for rerunning jobs - "slurm_use_all_node_CPUs": 0, # if given nodes, set to 1 - # if given CPUs, set to 0 + "slurm_time": 0, # total time. 0 = infinite time + "slurm_postpone_sbatch": 0, # if 1: don't submit, just make the script + "slurm_array": None, # override for --array, useful for rerunning jobs + "slurm_use_all_node_CPUs": 0, # if given nodes, set to 1 + # if given CPUs, set to 0 # you will want to use this if your Slurm SelectType is e.g. linear # which means it allocates all the CPUs in a node to the job - "slurm_control_CPUs": 0, # if so, leave this many for Pythons control (0) - "slurm_array": None, # override for --array, useful for rerunning jobs - "slurm_partition": None, # MUST be defined - "slurm_extra_settings": {}, # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. - - + "slurm_control_CPUs": 0, # if so, leave this many for Pythons control (0) + "slurm_array": None, # override for --array, useful for rerunning jobs + "slurm_partition": None, # MUST be defined + "slurm_extra_settings": {}, # Place to put extra configuration for the SLURM batch file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. ######################################## # Condor stuff ######################################## - "condor": 0, # 1 to use condor, 0 otherwise - "condor_command": '', # condor command e.g. "evolve", "join" - "condor_dir": '', # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all) - "condor_njobs": '', # number of scripts/jobs that CONDOR will run in total - "condor_jobid": '', # condor job id - "condor_postpone_join": 0, # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM) + "condor": 0, # 1 to use condor, 0 otherwise + "condor_command": "", # condor command e.g. "evolve", "join" + "condor_dir": "", # working directory containing e.g. scripts, output, logs (e.g. should be NFS available to all) + "condor_njobs": "", # number of scripts/jobs that CONDOR will run in total + "condor_jobid": "", # condor job id + "condor_postpone_join": 0, # if 1, data is not joined, e.g. if you want to do it off the condor grid (e.g. with more RAM) # "condor_join_machine": None, # if defined then this is the machine on which the join command should be launched (must be sshable and not postponed) - "condor_join_pwd": '', # directory the join should be in (defaults to $ENV{PWD} if undef) - "condor_memory": 1024, # in MB, the memory use (ImageSize) of the job - "condor_universe": 'vanilla', # usually vanilla universe - "condor_extra_settings": {}, # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. - + "condor_join_pwd": "", # directory the join should be in (defaults to $ENV{PWD} if undef) + "condor_memory": 1024, # in MB, the memory use (ImageSize) of the job + "condor_universe": "vanilla", # usually vanilla universe + "condor_extra_settings": {}, # Place to put extra configuration for the CONDOR submit file. The key and value of the dict will become the key and value of the line in te slurm batch file. Will be put in after all the other settings (and before the command). Take care not to overwrite something without really meaning to do so. # snapshots and checkpoints # condor_snapshot_on_kill=>0, # if 1 snapshot on SIGKILL before exit # condor_load_from_snapshot=>0, # if 1 check for snapshot .sv file and load it if found @@ -180,7 +176,6 @@ grid_options_defaults_dict = { # condor_resubmit_submitted=>0, # condor_resubmit_running=>0, # condor_resubmit_crashed=>0, - ########################## # Unordered. Need to go through this. Copied from the perl implementation. ########################## diff --git a/binarycpython/utils/hpc_functions.py b/binarycpython/utils/hpc_functions.py index dd8a0b7db662f46790a306219a784ab6d63f5576..1eae7769ca5a2b52aa1ab91e341c2678256f43e0 100644 --- a/binarycpython/utils/hpc_functions.py +++ b/binarycpython/utils/hpc_functions.py @@ -12,6 +12,7 @@ import time import subprocess import __main__ as main + def get_slurm_version(): """ Function that checks whether slurm is installed and returns the version if its installed. @@ -23,7 +24,7 @@ def get_slurm_version(): try: slurm_version = ( - subprocess.run(['sinfo', "-V"], stdout=subprocess.PIPE, check=True) + subprocess.run(["sinfo", "-V"], stdout=subprocess.PIPE, check=True) .stdout.decode("utf-8") .split() )[1] @@ -38,6 +39,7 @@ def get_slurm_version(): return slurm_version + def get_condor_version(): """ Function that checks whether slurm is installed and returns the version if its installed. @@ -51,7 +53,9 @@ def get_condor_version(): try: condor_version = ( - subprocess.run(['condor_q', "--version"], stdout=subprocess.PIPE, check=True) + subprocess.run( + ["condor_q", "--version"], stdout=subprocess.PIPE, check=True + ) .stdout.decode("utf-8") .split() )[1] @@ -66,6 +70,7 @@ def get_condor_version(): return condor_version + def create_directories_hpc(working_dir): """ Function to create a set of directories, given a root directory @@ -78,7 +83,15 @@ def create_directories_hpc(working_dir): print("Error. Working directory {} does not exist! Aborting") raise ValueError - directories_list = ['scripts', 'stdout', 'stderr', 'results', 'logs', 'status', 'joining'] + directories_list = [ + "scripts", + "stdout", + "stderr", + "results", + "logs", + "status", + "joining", + ] # Make directories. for subdir in directories_list: @@ -100,6 +113,7 @@ def create_directories_hpc(working_dir): directories_exist = False print("..Finished! Directories exist.") + def path_of_calling_script(): """ Function to get the name of the script the user executes. @@ -107,6 +121,7 @@ def path_of_calling_script(): return main.__file__ + def get_python_details(): """ Function to get some info about the used python version and virtualenv etc @@ -115,8 +130,8 @@ def get_python_details(): python_info_dict = {} # - python_info_dict['virtualenv'] = os.getenv('VIRTUAL_ENV') - python_info_dict['executable'] = sys.executable - python_info_dict['version'] = sys.version + python_info_dict["virtualenv"] = os.getenv("VIRTUAL_ENV") + python_info_dict["executable"] = sys.executable + python_info_dict["version"] = sys.version return python_info_dict diff --git a/coverage.txt b/coverage.txt deleted file mode 100644 index 547b8b8dbb9fe7f011bab00c2aaddb3b9b41a97a..0000000000000000000000000000000000000000 --- a/coverage.txt +++ /dev/null @@ -1,208 +0,0 @@ - -File: "binary_c-python/__init__.py" - - File is empty - Needed: 0; Found: 0; Missing: 0; Coverage: 0.0% - -File: "binary_c-python/setup.py" - - No module docstring - - No docstring for `readme` - Needed: 2; Found: 0; Missing: 2; Coverage: 0.0% - -File: "binary_c-python/examples/example_population.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/examples/examples.py" - - No module docstring - Needed: 6; Found: 5; Missing: 1; Coverage: 83.3% - -File: "binary_c-python/examples/example_plotting_distributions.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/examples/examples_custom_logging.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/examples/.ipynb_checkpoints/examples-checkpoint.py" - - No module docstring - Needed: 5; Found: 4; Missing: 1; Coverage: 80.0% - -File: "binary_c-python/snippets/multiprocessing_comparison.py" - - No module docstring - - No docstring for `parse_function` - - No docstring for `evolve_mp` - - No docstring for `g` - Needed: 5; Found: 1; Missing: 4; Coverage: 20.0% - -File: "binary_c-python/snippets/verbose.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/test.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/increment.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/phasevol.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/dict_merging.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/yield_test.py" - - No module docstring - - No docstring for `yielder` - Needed: 2; Found: 0; Missing: 2; Coverage: 0.0% - -File: "binary_c-python/snippets/montecarlo_example.py" - - No module docstring - - No docstring for `mass_montecarlo` - - No docstring for `calc_alpha` - - No docstring for `period_montecarlo` - - No docstring for `calc_beta` - - No docstring for `massratio_montecarlo` - - No docstring for `eccentricity_montecarlo` - Needed: 7; Found: 0; Missing: 7; Coverage: 0.0% - -File: "binary_c-python/snippets/defaultdict.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/snippets/multiprocessing_test.py" - - No module docstring - - No docstring for `calculate` - - No docstring for `calculate.run` - - No docstring for `run.f` - Needed: 4; Found: 0; Missing: 4; Coverage: 0.0% - -File: "binary_c-python/docs/source/conf.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/lib/__init__.py" - - File is empty - Needed: 0; Found: 0; Missing: 0; Coverage: 0.0% - -File: "binary_c-python/binarycpython/__init__.py" - - File is empty - Needed: 0; Found: 0; Missing: 0; Coverage: 0.0% - -File: "binary_c-python/binarycpython/core/__init__.py" - - File is empty - Needed: 0; Found: 0; Missing: 0; Coverage: 0.0% - -File: "binary_c-python/binarycpython/utils/grid_options_defaults.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/binarycpython/utils/distribution_functions.py" - - No module docstring - - No docstring for `calculate_constants_three_part_powerlaw` - - No docstring for `Arenou2010_binary_fraction` - Needed: 23; Found: 20; Missing: 3; Coverage: 87.0% - -File: "binary_c-python/binarycpython/utils/stellar_types.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/binarycpython/utils/spacing_functions.py" - - No module docstring - - No docstring for `const` - Needed: 2; Found: 0; Missing: 2; Coverage: 0.0% - -File: "binary_c-python/binarycpython/utils/useful_funcs.py" - - No module docstring - Needed: 7; Found: 6; Missing: 1; Coverage: 85.7% - -File: "binary_c-python/binarycpython/utils/__init__.py" - - File is empty - Needed: 0; Found: 0; Missing: 0; Coverage: 0.0% - -File: "binary_c-python/binarycpython/utils/custom_logging_functions.py" - - No module docstring - Needed: 9; Found: 8; Missing: 1; Coverage: 88.9% - -File: "binary_c-python/binarycpython/utils/functions.py" - - No module docstring - - No docstring for `parse_binary_c_version_info` - Needed: 13; Found: 11; Missing: 2; Coverage: 84.6% - -File: "binary_c-python/binarycpython/utils/grid.py" - - No module docstring - - No docstring for `Population` - - No docstring for `Population.set_bse_option` - - No docstring for `evolve_population_comparison.evolve_mp` - - No docstring for `evolve_population_comparison.g` - - No docstring for `evolve_population_mp.evolve_mp` - - No docstring for `evolve_population_mp.g` - Needed: 32; Found: 25; Missing: 7; Coverage: 78.1% - -File: "binary_c-python/binarycpython/utils/.ipynb_checkpoints/custom_logging_functions-checkpoint.py" - - No module docstring - Needed: 9; Found: 8; Missing: 1; Coverage: 88.9% - -File: "binary_c-python/binarycpython/utils/.ipynb_checkpoints/functions-checkpoint.py" - - No module docstring - - No docstring for `load_logfile` - Needed: 8; Found: 6; Missing: 2; Coverage: 75.0% - -File: "binary_c-python/tests/python_API_test.py" - - No module docstring - - No docstring for `test_run_binary` - - No docstring for `test_return_help` - - No docstring for `test_return_arglines` - - No docstring for `test_run_binary_with_log` - - No docstring for `test_run_binary_with_custom_logging` - - No docstring for `test_return_help_all` - - No docstring for `test_return_version_info` - - No docstring for `test_return_store` - - No docstring for `test_run_system` - Needed: 10; Found: 0; Missing: 10; Coverage: 0.0% - -File: "binary_c-python/tests/random_tests.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/tests/function_tests.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/tests/population/plot_scaling.py" - - No module docstring - - No docstring for `calc_mean_and_std` - Needed: 2; Found: 0; Missing: 2; Coverage: 0.0% - -File: "binary_c-python/tests/population/grid_tests_cmdline.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/tests/population/multiprocessing_via_population_comparison.py" - - No module docstring - - No docstring for `parse_function` - Needed: 3; Found: 1; Missing: 2; Coverage: 33.3% - -File: "binary_c-python/tests/population/grid_tests.py" - - No module docstring - Needed: 1; Found: 0; Missing: 1; Coverage: 0.0% - -File: "binary_c-python/tests/population/global_variable_for_distributions.py" - - No module docstring - - No docstring for `with_glob` - - No docstring for `without_glob` - Needed: 6; Found: 3; Missing: 3; Coverage: 50.0% - -File: "binary_c-python/.ipynb_checkpoints/python_API_test-checkpoint.py" - - No module docstring - - No docstring for `run_test_binary` - Needed: 2; Found: 0; Missing: 2; Coverage: 0.0% - - -Overall statistics for 41 files (5 files are empty): -Docstrings needed: 173; Docstrings found: 98; Docstrings missing: 75 -Total docstring coverage: 56.6%; Grade: Not bad diff --git a/tests/core/test_persistent_data.py b/tests/core/test_persistent_data.py index 6b27bbaa5d9578828854ef7e2a97fa9dcdbb9f81..5276f752dd15092382f69d9be3e6875ec63ef2b6 100644 --- a/tests/core/test_persistent_data.py +++ b/tests/core/test_persistent_data.py @@ -68,6 +68,7 @@ def test_return_persistent_data_memaddr(): assert isinstance(output, int), "memory adress has to be an integer" assert output != 0, "memory adress seems not to have a correct value" + def test_passing_persistent_data_to_run_system(): # Function to test the passing of the persistent data memoery adress, and having ensemble_defer = True # We should see that the results of multiple systems have been added to the one output json @@ -292,8 +293,6 @@ def test_adding_ensemble_output(): # print(json.dumps(test_1_merged_dict, indent=4)) # print(json.dumps(test_2_json, indent=4)) - - # assert inspect_dict(test_1_merged_dict, print_structure=False) == inspect_dict( test_2_json, print_structure=False @@ -359,6 +358,7 @@ def test_free_and_json_output(): # ensemble_jsons_1 = [line for line in output_1.splitlines() if line.startswith("ENSEMBLE_JSON")] # json_1 = json.loads(ensemble_jsons_1[0][len("ENSEMBLE_JSON "):], cls=binarycDecoder) + def test_all(): test_return_persistent_data_memaddr() test_passing_persistent_data_to_run_system() @@ -367,6 +367,7 @@ def test_all(): test_free_and_json_output() test_combine_with_empty_json() + #### if __name__ == "__main__": # test_return_persistent_data_memaddr() diff --git a/tests/core/test_return_store_memaddr.py b/tests/core/test_return_store_memaddr.py index 678077c0fb7bfbf75922f0ea757ce99651f10b86..25fdbe75cabfa57af92911344c4b60b2823e43a1 100644 --- a/tests/core/test_return_store_memaddr.py +++ b/tests/core/test_return_store_memaddr.py @@ -9,6 +9,7 @@ def test_return_store_memaddr(): print("store memory adress:") print(textwrap.indent(str(output), "\t")) + def test_unload_store_memaddr(): output = binary_c_python_api.return_store_memaddr() @@ -19,6 +20,7 @@ def test_unload_store_memaddr(): _ = binary_c_python_api.free_store_memaddr(output) print("freed store memaddr") + #### if __name__ == "__main__": test_return_store_memaddr()