diff --git a/badges/test_coverage.svg b/badges/test_coverage.svg index 012a8497e3c9a2a79933b6a7062337837907a8d7..f5af1dbeee2cacec20e9e6d0fbc93e757733fa2f 100644 --- a/badges/test_coverage.svg +++ b/badges/test_coverage.svg @@ -15,7 +15,7 @@ <g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11"> <text x="31.5" y="15" fill="#010101" fill-opacity=".3">coverage</text> <text x="31.5" y="14">coverage</text> - <text x="80" y="15" fill="#010101" fill-opacity=".3">70%</text> - <text x="80" y="14">70%</text> + <text x="80" y="15" fill="#010101" fill-opacity=".3">72%</text> + <text x="80" y="14">72%</text> </g> </svg> diff --git a/binarycpython/tests/test_functions.py b/binarycpython/tests/test_functions.py index dcc265abaf667d2c927df97e1d0b4f1280dbc8e2..364ce9b4724b0236cee9e07ef08dab462b185eeb 100644 --- a/binarycpython/tests/test_functions.py +++ b/binarycpython/tests/test_functions.py @@ -1,5 +1,26 @@ """ Unittests for the functions module + +TODO: format_number +TODO: now +TODO: check_if_in_shell +TODO: timedelta +TODO: get_ANSI_colours +TODO: mem_use +TODO: trem +TODO: conv_time_units +TODO: convert_bytes +TODO: get_size +TODO: imports +TODO: isfloat +TODO: isint +TODO: convfloat +TODO: datalinedict +TODO: pad_output_distribution +TODO: catchtime +TODO: is_capsule +TODO: Capturing +TODO: call_binary_c_config """ import os diff --git a/binarycpython/tests/test_grid.py b/binarycpython/tests/test_grid.py index 71c2ba13cbfb1c40693b8ff8737e1cfd6f40153f..09b429dd1ef55c978d265243f7e1c9803d53b499 100644 --- a/binarycpython/tests/test_grid.py +++ b/binarycpython/tests/test_grid.py @@ -27,6 +27,7 @@ import os import sys import json import gzip +import shutil import unittest import numpy as np @@ -38,10 +39,13 @@ from binarycpython.utils.dicts import ( from binarycpython.utils.grid import Population TMP_DIR = temp_dir("tests", "test_grid") +shutil.rmtree(TMP_DIR) +os.makedirs(TMP_DIR, exist_ok=True) + TEST_VERBOSITY = 1 -def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): +def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): # pragma: no cover """ Simple parse function that directly appends all the output to a file """ @@ -68,7 +72,7 @@ def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): first_f.write(output + "\n") -def parse_function_adding_results(self, output): +def parse_function_adding_results(self, output): # pragma: no cover """ Example parse function """ @@ -107,14 +111,14 @@ class test__setup(unittest.TestCase): """ def test_setup(self): - with Capturing() as output: + with Capturing() as _: self._test_setup() def _test_setup(self): """ Unittests for function _setup """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) self.assertTrue("orbital_period" in test_pop.defaults) self.assertTrue("metallicity" in test_pop.defaults) @@ -123,7 +127,7 @@ class test__setup(unittest.TestCase): self.assertEqual(test_pop.custom_options, {}) self.assertEqual(test_pop.argline_dict, {}) self.assertEqual(test_pop.persistent_data_memory_dict, {}) - self.assertTrue(test_pop.grid_options["parse_function"] == None) + self.assertTrue(test_pop.grid_options["parse_function"] is None) self.assertTrue(isinstance(test_pop.grid_options["_main_pid"], int)) @@ -133,7 +137,7 @@ class test_set(unittest.TestCase): """ def test_set(self): - with Capturing() as output: + with Capturing() as _: self._test_set() def _test_set(self): @@ -141,7 +145,7 @@ class test_set(unittest.TestCase): Unittests for function set """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set(num_cores=2, verbosity=TEST_VERBOSITY) test_pop.set(M_1=10) test_pop.set(data_dir="/tmp/binary_c_python") @@ -164,7 +168,7 @@ class test_cmdline(unittest.TestCase): """ def test_cmdline(self): - with Capturing() as output: + with Capturing() as _: self._test_cmdline() def _test_cmdline(self): @@ -184,7 +188,7 @@ class test_cmdline(unittest.TestCase): ] # Set up population - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set(data_dir="/tmp", verbosity=TEST_VERBOSITY) # parse arguments @@ -212,7 +216,7 @@ class test__return_argline(unittest.TestCase): """ def test__return_argline(self): - with Capturing() as output: + with Capturing() as _: self._test__return_argline() def _test__return_argline(self): @@ -221,7 +225,7 @@ class test__return_argline(unittest.TestCase): """ # Set up population - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set(metallicity=0.02, verbosity=TEST_VERBOSITY) test_pop.set(M_1=10) @@ -243,7 +247,7 @@ class test_return_population_settings(unittest.TestCase): """ def test_return_population_settings(self): - with Capturing() as output: + with Capturing() as _: self._test_return_population_settings() def _test_return_population_settings(self): @@ -251,7 +255,7 @@ class test_return_population_settings(unittest.TestCase): Unittests for the function return_population_settings """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set(metallicity=0.02, verbosity=TEST_VERBOSITY) test_pop.set(M_1=10) test_pop.set(num_cores=2) @@ -276,7 +280,7 @@ class test_return_binary_c_defaults(unittest.TestCase): """ def test_return_binary_c_defaults(self): - with Capturing() as output: + with Capturing() as _: self._test_return_binary_c_defaults() def _test_return_binary_c_defaults(self): @@ -284,7 +288,7 @@ class test_return_binary_c_defaults(unittest.TestCase): Unittests for the function return_binary_c_defaults """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) binary_c_defaults = test_pop.return_binary_c_defaults() self.assertIn("probability", binary_c_defaults) self.assertIn("phasevol", binary_c_defaults) @@ -297,7 +301,7 @@ class test_return_all_info(unittest.TestCase): """ def test_return_all_info(self): - with Capturing() as output: + with Capturing() as _: self._test_return_all_info() def _test_return_all_info(self): @@ -306,7 +310,7 @@ class test_return_all_info(unittest.TestCase): Not going to do too much tests here, just check if they are not empty """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) all_info = test_pop.return_all_info() self.assertIn("population_settings", all_info) @@ -326,7 +330,7 @@ class test_export_all_info(unittest.TestCase): """ def test_export_all_info(self): - with Capturing() as output: + with Capturing() as _: self._test_export_all_info() def _test_export_all_info(self): @@ -334,7 +338,7 @@ class test_export_all_info(unittest.TestCase): Unittests for the function export_all_info """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set(metallicity=0.02, verbosity=TEST_VERBOSITY) test_pop.set(M_1=10) @@ -398,7 +402,7 @@ class test__cleanup_defaults(unittest.TestCase): """ def test__cleanup_defaults(self): - with Capturing() as output: + with Capturing() as _: self._test__cleanup_defaults() def _test__cleanup_defaults(self): @@ -406,7 +410,7 @@ class test__cleanup_defaults(unittest.TestCase): Unittests for the function _cleanup_defaults """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) cleaned_up_defaults = test_pop._cleanup_defaults() self.assertNotIn("help_all", cleaned_up_defaults) @@ -417,7 +421,7 @@ class test__increment_probtot(unittest.TestCase): """ def test__increment_probtot(self): - with Capturing() as output: + with Capturing() as _: self._test__increment_probtot() def _test__increment_probtot(self): @@ -425,7 +429,7 @@ class test__increment_probtot(unittest.TestCase): Unittests for the function _increment_probtot """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop._increment_probtot(0.5) self.assertEqual(test_pop.grid_options["_probtot"], 0.5) @@ -436,7 +440,7 @@ class test__increment_count(unittest.TestCase): """ def test__increment_count(self): - with Capturing() as output: + with Capturing() as _: self._test__increment_count() def _test__increment_count(self): @@ -444,7 +448,7 @@ class test__increment_count(unittest.TestCase): Unittests for the function _increment_count """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop._increment_count() self.assertEqual(test_pop.grid_options["_count"], 1) @@ -455,7 +459,7 @@ class test__dict_from_line_source_file(unittest.TestCase): """ def test__dict_from_line_source_file(self): - with Capturing() as output: + with Capturing() as _: self._test__dict_from_line_source_file() def _test__dict_from_line_source_file(self): @@ -469,7 +473,7 @@ class test__dict_from_line_source_file(unittest.TestCase): with open(source_file, "w") as f: f.write("binary_c M_1 10 metallicity 0.02\n") - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) # readout with open(source_file, "r") as f: @@ -486,7 +490,7 @@ class test_evolve_single(unittest.TestCase): """ def test_evolve_single(self): - with Capturing() as output: + with Capturing() as _: self._test_evolve_single() def _test_evolve_single(self): @@ -508,7 +512,7 @@ class test_evolve_single(unittest.TestCase): ); """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( M_1=10, M_2=5, @@ -528,7 +532,7 @@ class test_evolve_single(unittest.TestCase): # custom_logging_dict = {"TEST_CUSTOM_LOGGING_2": ["star[0].mass", "model.time"]} - test_pop_2 = Population() + test_pop_2 = Population(tmp_dir=TMP_DIR) test_pop_2.set( M_1=10, M_2=5, @@ -555,6 +559,10 @@ class test_resultdict(unittest.TestCase): """ def test_adding_results(self): + with Capturing() as _: + self._test_adding_results() + + def _test_adding_results(self): """ Function to test whether the results are properly added and combined """ @@ -576,7 +584,7 @@ class test_resultdict(unittest.TestCase): stardata->model.max_evolution_time = stardata->model.time - stardata->model.dtm; """ - example_pop = Population() + example_pop = Population(tmp_dir=TMP_DIR) example_pop.set(verbosity=0) example_pop.set( max_evolution_time=15000, # bse_options @@ -663,7 +671,7 @@ class test_grid_evolve(unittest.TestCase): """ def test_grid_evolve_1_thread(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_1_thread() def _test_grid_evolve_1_thread(self): @@ -671,7 +679,7 @@ class test_grid_evolve(unittest.TestCase): Unittests to see if 1 thread does all the systems """ - test_pop_evolve_1_thread = Population() + test_pop_evolve_1_thread = Population(tmp_dir=TMP_DIR) test_pop_evolve_1_thread.set( num_cores=1, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY ) @@ -701,7 +709,7 @@ class test_grid_evolve(unittest.TestCase): self.assertTrue(analytics["total_count"] == 10) def test_grid_evolve_2_threads(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_2_threads() def _test_grid_evolve_2_threads(self): @@ -709,7 +717,7 @@ class test_grid_evolve(unittest.TestCase): Unittests to see if multiple threads handle the all the systems correctly """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY ) @@ -739,7 +747,7 @@ class test_grid_evolve(unittest.TestCase): self.assertTrue(analytics["total_count"] == 10) def test_grid_evolve_2_threads_with_custom_logging(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_2_threads_with_custom_logging() def _test_grid_evolve_2_threads_with_custom_logging(self): @@ -751,7 +759,7 @@ class test_grid_evolve(unittest.TestCase): num_cores_value = 2 custom_logging_string = 'Printf("MY_STELLAR_DATA_TEST_EXAMPLE %g %g %g %g\\n",((double)stardata->model.time),((double)stardata->star[0].mass),((double)stardata->model.probability),((double)stardata->model.dt));' - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=num_cores_value, @@ -801,7 +809,7 @@ class test_grid_evolve(unittest.TestCase): remove_file(output_name) def test_grid_evolve_with_condition_error(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_with_condition_error() def _test_grid_evolve_with_condition_error(self): @@ -809,7 +817,7 @@ class test_grid_evolve(unittest.TestCase): Unittests to see if the threads catch the errors correctly. """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY ) @@ -858,12 +866,12 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", self.assertEqual(analytics["failed_systems_error_codes"], [0]) self.assertTrue(analytics["total_count"] == 10) self.assertTrue(analytics["failed_count"] == 10) - self.assertTrue(analytics["errors_found"] == True) - self.assertTrue(analytics["errors_exceeded"] == True) + self.assertTrue(analytics["errors_found"] is True) + self.assertTrue(analytics["errors_exceeded"] is True) # test to see if 1 thread does all the systems - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY ) @@ -907,7 +915,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", # self.assertRaises(ValueError, test_pop.evolve) def test_grid_evolve_no_grid_variables(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_no_grid_variables() def _test_grid_evolve_no_grid_variables(self): @@ -915,7 +923,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", Unittests to see if errors are raised if there are no grid variables """ - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=1, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY ) @@ -924,7 +932,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", self.assertRaises(ValueError, test_pop.evolve) def test_grid_evolve_2_threads_with_ensemble_direct_output(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_2_threads_with_ensemble_direct_output() def _test_grid_evolve_2_threads_with_ensemble_direct_output(self): @@ -935,7 +943,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", data_dir_value = TMP_DIR num_cores_value = 2 - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=num_cores_value, verbosity=TEST_VERBOSITY, @@ -994,7 +1002,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", self.assertNotEqual(ensemble_json["number_counts"], {}) def test_grid_evolve_2_threads_with_ensemble_combining(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_2_threads_with_ensemble_combining() def _test_grid_evolve_2_threads_with_ensemble_combining(self): @@ -1005,7 +1013,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", data_dir_value = TMP_DIR num_cores_value = 2 - test_pop = Population() + test_pop = Population(tmp_dir=TMP_DIR) test_pop.set( num_cores=num_cores_value, verbosity=TEST_VERBOSITY, @@ -1049,7 +1057,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", ) def test_grid_evolve_2_threads_with_ensemble_comparing_two_methods(self): - with Capturing() as output: + with Capturing() as _: self._test_grid_evolve_2_threads_with_ensemble_comparing_two_methods() def _test_grid_evolve_2_threads_with_ensemble_comparing_two_methods(self): @@ -1061,7 +1069,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", num_cores_value = 2 # First - test_pop_1 = Population() + test_pop_1 = Population(tmp_dir=TMP_DIR) test_pop_1.set( num_cores=num_cores_value, verbosity=TEST_VERBOSITY, @@ -1098,7 +1106,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", ensemble_output_1 = test_pop_1.grid_ensemble_results # second - test_pop_2 = Population() + test_pop_2 = Population(tmp_dir=TMP_DIR) test_pop_2.set( num_cores=num_cores_value, verbosity=TEST_VERBOSITY, diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py index aec64d77cafa88214171bcc88a2c9918214790d6..363180ef603779ebfc5cd5c06b336b815a502ac1 100644 --- a/binarycpython/utils/functions.py +++ b/binarycpython/utils/functions.py @@ -1161,7 +1161,7 @@ def write_binary_c_parameter_descriptions_to_rst_file(output_file: str) -> None: ######################################################## -def load_logfile(logfile: str) -> None: +def load_logfile(logfile: str) -> None: # pragma: no cover """ Experimental function that parses the generated log file of binary_c. diff --git a/binarycpython/utils/population_extensions/HPC.py b/binarycpython/utils/population_extensions/HPC.py index fd55d76fcc2462bd4f5e16374616ad19eb0b746e..18934e137969c4a4f6249a3e01058e0b82f444f3 100644 --- a/binarycpython/utils/population_extensions/HPC.py +++ b/binarycpython/utils/population_extensions/HPC.py @@ -384,13 +384,16 @@ class HPC(condor, slurm): dirs = [] return dirs - def HPC_grid(self, makejoiningfile=True): + def HPC_grid(self, makejoiningfile=True): # pragma: no cover """ Function to call the appropriate HPC grid function (e.g. Slurm or Condor) and return what it returns. Args: makejoiningfile : if True, and we're the first job with self.HPC_task() == 2, we build the joiningfile. (default=True) This option exists in case you don't want to overwrite an existing joiningfile, or want to build it in another way (e.g. in the HPC scripts). + + TODO: Exclude this function from testing for now + TODO: Comment this function better """ jobid = self.HPC_jobID_tuple()[0] @@ -707,9 +710,10 @@ class HPC(condor, slurm): print(json.dumps(d, indent=4)) print("############################################################") - def HPC_queue_stats(self): + def HPC_queue_stats(self): # pragma: no cover """ Function that returns the queue stats for the HPC grid + TODO: the slurm_queue_stats doesntt actually return anything """ diff --git a/binarycpython/utils/population_extensions/condor.py b/binarycpython/utils/population_extensions/condor.py index ab3996aa7a172956f0f2d9b4af389f48989b2e2b..3ea970fb551e5b9f587b9c5f05163ab5dd36835c 100644 --- a/binarycpython/utils/population_extensions/condor.py +++ b/binarycpython/utils/population_extensions/condor.py @@ -215,13 +215,16 @@ class condor: time.sleep(1) break - def condor_grid(self): + def condor_grid(self): # pragma: no cover """ function to be called when running grids when grid_options['condor']>=1 if grid_options['condor']==1, we set up the condor script and launch the jobs, then return True to exit. if grid_options['condor']==2, we run the stars, which means we return False to continue. if grid_options['condor']==3, we are being called from the jobs to run the grids, return False to continue. + + TODO: split this function into some parts + TODO: Comment this function better """ if self.grid_options["condor"] == 3: @@ -518,7 +521,7 @@ queue {njobs} # return True so we exit immediately return True - def condor_queue_stats(self): + def condor_queue_stats(self): # pragma: no cover """ Return condor queue statistics for this job """ diff --git a/binarycpython/utils/population_extensions/slurm.py b/binarycpython/utils/population_extensions/slurm.py index 7e300ff9731f0b3fa10d9148403db914e4cb06f5..b19ed1c07a71a1eae8fae960c3976d364ca9591b 100644 --- a/binarycpython/utils/population_extensions/slurm.py +++ b/binarycpython/utils/population_extensions/slurm.py @@ -205,7 +205,7 @@ class slurm: time.sleep(1) break - def slurm_grid(self): + def slurm_grid(self): # pragma: no cover """ function to be called when running grids when grid_options['slurm']>=1 @@ -213,6 +213,8 @@ class slurm: if grid_options['slurm']==2, we run the stars, which means we return False to continue. if grid_options['slurm']==3, we are being called from the jobs to run the grids, return False to continue. + TODO: split this function into some parts + TODO: Comment this function better """ if self.grid_options["slurm"] == 2: @@ -441,7 +443,7 @@ echo \"running\" > {slurm_dir}/status/$SLURM_ARRAY_JOB_ID.$SLURM_ARRAY_TASK_ID # return True so we exit immediately return True - def slurm_queue_stats(self): + def slurm_queue_stats(self): # pragma: no cover """ Function to XXX