diff --git a/binarycpython/tests/main.py b/binarycpython/tests/main.py index da398439bc840f2e30c0f56d76022b2632a54545..84866198d6c56f762ebbad458fe1f1fd03644692 100755 --- a/binarycpython/tests/main.py +++ b/binarycpython/tests/main.py @@ -10,7 +10,7 @@ import unittest from binarycpython.tests.test_c_bindings import ( test_run_system, test_return_store_memaddr, - test_ensemble_functions + test_ensemble_functions, ) from binarycpython.tests.test_custom_logging import ( test_autogen_C_logging_code, @@ -18,7 +18,7 @@ from binarycpython.tests.test_custom_logging import ( test_binary_c_write_log_code, test_from_binary_c_config, test_return_compilation_dict, - test_create_and_load_logging_function + test_create_and_load_logging_function, ) from binarycpython.tests.test_dicts import ( test_merge_dicts, @@ -38,7 +38,7 @@ from binarycpython.tests.test_dicts import ( test_multiply_float_values, test_subtract_dicts, test_update_dicts, - test__nested_get + test__nested_get, ) from binarycpython.tests.test_ensemble import ( test_binaryc_json_serializer, @@ -48,7 +48,7 @@ from binarycpython.tests.test_ensemble import ( test_extract_ensemble_json_from_string, test_load_ensemble, test_ensemble_file_type, - test_open_ensemble + test_open_ensemble, ) from binarycpython.tests.test_functions import ( test_verbose_print, @@ -65,7 +65,7 @@ from binarycpython.tests.test_functions import ( test_get_help_super, test_make_build_text, test_write_binary_c_parameter_descriptions_to_rst_file, - test_bin_data + test_bin_data, ) from binarycpython.tests.test_grid import ( test__setup, @@ -82,7 +82,7 @@ from binarycpython.tests.test_grid import ( test__dict_from_line_source_file, test_evolve_single, test_grid_evolve, - test_resultdict + test_resultdict, ) from binarycpython.tests.test_plot_functions import ( test_color_by_index, @@ -108,23 +108,48 @@ from binarycpython.tests.tests_population_extensions.test_distribution_functions test_raghavan2010_binary_fraction, test_Izzard2012_period_distribution, test_flatsections, - test_sana12 + test_sana12, ) from binarycpython.tests.tests_population_extensions.test_grid_options_defaults import ( - test_grid_options_help, + test_grid_options_help, test_grid_options_description_checker, test_write_grid_options_to_rst_file, ) from binarycpython.tests.tests_population_extensions.test_version_info import ( test_return_binary_c_version_info, - test_parse_binary_c_version_info + test_parse_binary_c_version_info, ) from binarycpython.tests.tests_population_extensions.test_gridcode import ( test_add_grid_variable, ) - +from binarycpython.tests.tests_population_extensions.test_HPC import ( + test_HPC_njobs, + test_HPC_job, + test_HPC_job_type, + test_HPC_jobID, + test_HPC_jobID_tuple, + test_HPC_dirs, + test_HPC_id_filename, + test_HPC_check_requirements, + test_HPC_set_status, + test_HPC_get_status, +) +from binarycpython.tests.tests_population_extensions.test_condor import ( + test_condorID, + test_condor_dirs, + test_condor_check_requirements, + test_set_condor_status, + test_get_condor_status, +) +from binarycpython.tests.tests_population_extensions.test_slurm import ( + test_slurmID, + test_slurm_dirs, + test_slurm_check_requirements, + test_set_slurm_status, + test_get_slurm_status, +) from binarycpython.tests.test_stellar_types import * from binarycpython.tests.test_useful_funcs import ( @@ -142,6 +167,5 @@ from binarycpython.tests.test_useful_funcs import ( # from binarycpython.tests.test_hpc_functions import * - if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_custom_logging.py b/binarycpython/tests/test_custom_logging.py index 49334004d82064816ee6383d4376f2ef0672865c..4b30ae48d0889c2ea371b4812f19ba9d9dc090d5 100644 --- a/binarycpython/tests/test_custom_logging.py +++ b/binarycpython/tests/test_custom_logging.py @@ -48,6 +48,7 @@ class test_autogen_C_logging_code(unittest.TestCase): output_3 = autogen_C_logging_code(input_dict_3, verbosity=1) self.assertEqual(output_3, None, msg="Output should be None") + class test_binary_c_log_code(unittest.TestCase): """ Unit test for binary_c_log_code @@ -77,6 +78,7 @@ class test_binary_c_log_code(unittest.TestCase): msg="Output does not match what it should be: {}".format(test_value_2), ) + class test_binary_c_write_log_code(unittest.TestCase): """ Unit test for binary_c_write_log_code @@ -108,6 +110,7 @@ class test_binary_c_write_log_code(unittest.TestCase): content_file = repr(f.read()) self.assertEqual(repr(input_1), content_file, msg="Contents are not similar") + class test_from_binary_c_config(unittest.TestCase): """ Unit test for from_binary_c_config @@ -146,6 +149,7 @@ class test_from_binary_c_config(unittest.TestCase): msg="binary_c version doesnt match", ) + class test_return_compilation_dict(unittest.TestCase): """ Unit test for return_compilation_dict @@ -172,6 +176,7 @@ class test_return_compilation_dict(unittest.TestCase): self.assertTrue("libs" in output) self.assertTrue("inc" in output) + class test_create_and_load_logging_function(unittest.TestCase): """ Unit test for return_compilation_dict @@ -198,5 +203,6 @@ class test_create_and_load_logging_function(unittest.TestCase): msg="Name of the libcustom_logging not correct", ) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_dicts.py b/binarycpython/tests/test_dicts.py index 831b8bbadc204c3c8b636c23883a3260905a75fa..8d12b7caa0f00e6173033ba032ce58e9b9fa9fd8 100644 --- a/binarycpython/tests/test_dicts.py +++ b/binarycpython/tests/test_dicts.py @@ -31,7 +31,7 @@ from binarycpython.utils.dicts import ( subtract_dicts, update_dicts, _nested_get, - _nested_set + _nested_set, ) TMP_DIR = temp_dir("tests", "test_dicts") @@ -301,20 +301,19 @@ class test_custom_sort_dict(unittest.TestCase): Test custom_sort_dict """ - input_dict = {'2': 1, '1': {2: 1, 1: 10}, -1: 20, 4: -1} + input_dict = {"2": 1, "1": {2: 1, 1: 10}, -1: 20, 4: -1} # output_1 = custom_sort_dict(input_dict) - desired_output_1 = OrderedDict([(-1, 20), - (4, -1), - ('1', OrderedDict([(1, 10), (2, 1)])), - ('2', 1)] + desired_output_1 = OrderedDict( + [(-1, 20), (4, -1), ("1", OrderedDict([(1, 10), (2, 1)])), ("2", 1)] ) # self.assertEqual(output_1, desired_output_1) + class test_filter_dict(unittest.TestCase): """ Unittests for function filter_dict @@ -329,8 +328,8 @@ class test_filter_dict(unittest.TestCase): Test filter_dict """ - dict_1 = {'a': 10} - input_1 = ['a'] + dict_1 = {"a": 10} + input_1 = ["a"] res_1 = filter_dict(dict_1, input_1) @@ -352,7 +351,7 @@ class test_filter_dict_through_values(unittest.TestCase): Test filter_dict_through_values """ - dict_1 = {'a': 10} + dict_1 = {"a": 10} input_1 = [10] res_1 = filter_dict_through_values(dict_1, input_1) @@ -360,6 +359,7 @@ class test_filter_dict_through_values(unittest.TestCase): self.assertIsInstance(res_1, dict) self.assertFalse(res_1) + class test_prepare_dict(unittest.TestCase): """ Unittests for function prepare_dict @@ -377,14 +377,15 @@ class test_prepare_dict(unittest.TestCase): global_dict = {} # Call function to make sure the nested key contains an empty dict to store stuff in - input_1 = ['a', 'b'] + input_1 = ["a", "b"] prepare_dict(global_dict, input_1) # - self.assertIsNotNone(global_dict.get('a', None)) - self.assertIsNotNone(global_dict['a'].get('b', None)) - self.assertIsInstance(global_dict['a']['b'], dict) - self.assertFalse(global_dict['a']['b']) + self.assertIsNotNone(global_dict.get("a", None)) + self.assertIsNotNone(global_dict["a"].get("b", None)) + self.assertIsInstance(global_dict["a"]["b"], dict) + self.assertFalse(global_dict["a"]["b"]) + class test_normalize_dict(unittest.TestCase): """ @@ -400,12 +401,13 @@ class test_normalize_dict(unittest.TestCase): Test normalize_dict """ - input_1 = {'a': 10, 'b': 20, 'c': 4} + input_1 = {"a": 10, "b": 20, "c": 4} res_1 = normalize_dict(input_1) self.assertEqual(sum(list(res_1.values())), 1.0) + class test_multiply_values_dict(unittest.TestCase): """ Unittests for function multiply_values_dict @@ -420,8 +422,8 @@ class test_multiply_values_dict(unittest.TestCase): Test multiply_values_dict """ - input_1 = {'a': 1, 'b': {'c': 10}} - desired_output_1 = {'a': 2, 'b': {'c': 20}} + input_1 = {"a": 1, "b": {"c": 10}} + desired_output_1 = {"a": 2, "b": {"c": 20}} output_1 = multiply_values_dict(input_1, 2) @@ -444,12 +446,13 @@ class test_count_keys_recursive(unittest.TestCase): """ # - input_1 = {'a': 2, 'b': {'c': 20, 'd': {'aa': 1, 'bb': 2}}} + input_1 = {"a": 2, "b": {"c": 20, "d": {"aa": 1, "bb": 2}}} output_1 = count_keys_recursive(input_1) # self.assertEqual(output_1, 6) + class test_keys_to_floats(unittest.TestCase): """ Unittests for function keys_to_floats @@ -464,13 +467,14 @@ class test_keys_to_floats(unittest.TestCase): Test keys_to_floats """ - input_1 = {'a': 1, '1': 2, '1.0': 3, 'b': {4: 10, '5': 1}} + input_1 = {"a": 1, "1": 2, "1.0": 3, "b": {4: 10, "5": 1}} output_1 = keys_to_floats(input_1) - desired_output_1 = {'a': 1, 1.0: 3, 'b': {4.0: 10, 5.0: 1}} + desired_output_1 = {"a": 1, 1.0: 3, "b": {4.0: 10, 5.0: 1}} self.assertEqual(output_1, desired_output_1) + class test_recursive_change_key_to_float(unittest.TestCase): """ Unittests for function recursive_change_key_to_float @@ -485,13 +489,16 @@ class test_recursive_change_key_to_float(unittest.TestCase): Test recursive_change_key_to_float """ - input_1 = {'a': 1, '1': 2, '1.0': 3, 'b': {4: 10, '5': 1}} + input_1 = {"a": 1, "1": 2, "1.0": 3, "b": {4: 10, "5": 1}} output_1 = recursive_change_key_to_float(input_1) - desired_output_1 = OrderedDict([('a', 1), (1.0, 3), ('b', OrderedDict([(4.0, 10), (5.0, 1)]))]) + desired_output_1 = OrderedDict( + [("a", 1), (1.0, 3), ("b", OrderedDict([(4.0, 10), (5.0, 1)]))] + ) self.assertEqual(output_1, desired_output_1) + class test_recursive_change_key_to_string(unittest.TestCase): """ Unittests for function recursive_change_key_to_string @@ -506,18 +513,23 @@ class test_recursive_change_key_to_string(unittest.TestCase): Test recursive_change_key_to_string """ - input_1 = {'a': 1, '1': 2, '1.0': 3, 'b': {4: 10, '5': 1, 6: 10}} + input_1 = {"a": 1, "1": 2, "1.0": 3, "b": {4: 10, "5": 1, 6: 10}} output_1 = recursive_change_key_to_string(input_1, "{:.2E}") - desired_output_1 = OrderedDict([('a', 1), - ('1.00E+00', 3), - ('b', - OrderedDict([('4.00E+00', 10), - ('5.00E+00', 1), - ('6.00E+00', 10)]))]) + desired_output_1 = OrderedDict( + [ + ("a", 1), + ("1.00E+00", 3), + ( + "b", + OrderedDict([("4.00E+00", 10), ("5.00E+00", 1), ("6.00E+00", 10)]), + ), + ] + ) self.assertEqual(output_1, desired_output_1) + class test_multiply_float_values(unittest.TestCase): """ Unittests for function multiply_float_values @@ -533,17 +545,18 @@ class test_multiply_float_values(unittest.TestCase): """ # Test with all valid input - input_1 = {1: 2.2, '2': {'a': 2, 'b': 10, 'c': 0.5}} + input_1 = {1: 2.2, "2": {"a": 2, "b": 10, "c": 0.5}} multiply_float_values(input_1, 2) - desired_output_1 = {1: 4.4, '2': {'a': 2, 'b': 10, 'c': 1.0}} + desired_output_1 = {1: 4.4, "2": {"a": 2, "b": 10, "c": 1.0}} # self.assertEqual(input_1, desired_output_1) # Test with unrecognised input: - input_2 = {1: 2.2, '2': {'a': 2, 'b': 10, 'c': 0.5, 'd': dummy('david')}} + input_2 = {1: 2.2, "2": {"a": 2, "b": 10, "c": 0.5, "d": dummy("david")}} _ = multiply_float_values(input_2, 2) + class test_subtract_dicts(unittest.TestCase): """ Unittests for function subtract_dicts @@ -622,8 +635,8 @@ class test_subtract_dicts(unittest.TestCase): Test subtract_dicts resulting in a 0 value. which should be removed """ - dict_1 = {"a": 4, 'b': 0, 'd': 1.0} - dict_2 = {"a": 4, 'c': 0, 'd': 1} + dict_1 = {"a": 4, "b": 0, "d": 1.0} + dict_2 = {"a": 4, "c": 0, "d": 1} output_dict = subtract_dicts(dict_1, dict_2) self.assertIsInstance(output_dict, dict) @@ -638,8 +651,8 @@ class test_subtract_dicts(unittest.TestCase): Test merging dict with lists """ - dict_1 = {"list": [1, 2], 'b': [1]} - dict_2 = {"list": [3, 4], 'c': [1]} + dict_1 = {"list": [1, 2], "b": [1]} + dict_2 = {"list": [3, 4], "c": [1]} self.assertRaises(ValueError, subtract_dicts, dict_1, dict_2) @@ -657,9 +670,8 @@ class test_subtract_dicts(unittest.TestCase): output_dict = subtract_dicts(dict_1, dict_2) self.assertTrue(isinstance(output_dict["dict"], dict)) - self.assertEqual( - output_dict["dict"], {"a": -1, "b": 1, "c": -2} - ) + self.assertEqual(output_dict["dict"], {"a": -1, "b": 1, "c": -2}) + class test_update_dicts(unittest.TestCase): """ @@ -680,9 +692,7 @@ class test_update_dicts(unittest.TestCase): output_dict = update_dicts(dict_1, dict_2) self.assertTrue(isinstance(output_dict["dict"], dict)) - self.assertEqual( - output_dict["dict"], {"a": 2, "b": 1, "c": 2} - ) + self.assertEqual(output_dict["dict"], {"a": 2, "b": 1, "c": 2}) def test_unsupported(self): with Capturing() as output: @@ -693,8 +703,8 @@ class test_update_dicts(unittest.TestCase): Test update_dicts with unsupported types """ - dict_1 = {"list": 2, 'b': [1]} - dict_2 = {"list": [3, 4], 'c': [1]} + dict_1 = {"list": 2, "b": [1]} + dict_2 = {"list": [3, 4], "c": [1]} self.assertRaises(ValueError, update_dicts, dict_1, dict_2) @@ -713,14 +723,15 @@ class test__nested_get(unittest.TestCase): Test _nested_get """ - input_1 = {'a': {'b': 2}} + input_1 = {"a": {"b": 2}} - output_1 = _nested_get(input_1, ['a']) - output_2 = _nested_get(input_1, ['a', 'b']) + output_1 = _nested_get(input_1, ["a"]) + output_2 = _nested_get(input_1, ["a", "b"]) - self.assertEqual(output_1, {'b': 2}) + self.assertEqual(output_1, {"b": 2}) self.assertEqual(output_2, 2) + class test__nested_set(unittest.TestCase): """ Unittests for function _nested_set @@ -736,22 +747,23 @@ class test__nested_set(unittest.TestCase): """ # - input_1 = {'a': 0} - desired_output_1 = {'a': 2} - _nested_set(input_1, ['a'], 2) + input_1 = {"a": 0} + desired_output_1 = {"a": 2} + _nested_set(input_1, ["a"], 2) self.assertEqual(input_1, desired_output_1) # - input_2 = {'a': {'b': 0}} - desired_output_2 = {'a': {'b': 2}} - _nested_set(input_2, ['a', 'b'], 2) + input_2 = {"a": {"b": 0}} + desired_output_2 = {"a": {"b": 2}} + _nested_set(input_2, ["a", "b"], 2) self.assertEqual(input_2, desired_output_2) # - input_3 = {'a': {'b': 0}} - desired_output_3 = {'a': {'b': 0, 'd': {'c': 10}}} - _nested_set(input_3, ['a', 'd', 'c'], 10) + input_3 = {"a": {"b": 0}} + desired_output_3 = {"a": {"b": 0, "d": {"c": 10}}} + _nested_set(input_3, ["a", "d", "c"], 10) self.assertEqual(input_3, desired_output_3) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_ensemble.py b/binarycpython/tests/test_ensemble.py index 169729eb87b60aa9c75ef3069f4672d1de4cb454..3fe789a7cd98db85099b1d8cd827a8248b87ad36 100644 --- a/binarycpython/tests/test_ensemble.py +++ b/binarycpython/tests/test_ensemble.py @@ -22,14 +22,14 @@ from binarycpython.utils.ensemble import ( extract_ensemble_json_from_string, load_ensemble, ensemble_file_type, - open_ensemble - + open_ensemble, ) from binarycpython.utils.grid import Population TMP_DIR = temp_dir("tests", "test_ensemble") TEST_VERBOSITY = 1 + class test_binaryc_json_serializer(unittest.TestCase): """ Unittests for function binaryc_json_serializer @@ -99,12 +99,12 @@ class test_BinarycEncoder(unittest.TestCase): Test that the object is converted to strings """ - input_1 = {'a': BinarycEncoder} + input_1 = {"a": BinarycEncoder} output_1 = json.dumps(input_1, cls=BinarycEncoder) self.assertTrue(isinstance(output_1, str)) dict_output_1 = json.loads(output_1) - self.assertTrue(isinstance(dict_output_1['a'], str)) + self.assertTrue(isinstance(dict_output_1["a"], str)) class test_BinarycDecoder(unittest.TestCase): @@ -121,7 +121,6 @@ class test_BinarycDecoder(unittest.TestCase): Test that the object is converted to floats """ - input_1 = '{"a": "10.0"}' output_1 = json.loads(input_1) output_2 = json.loads(input_1, cls=BinarycDecoder) @@ -144,11 +143,11 @@ class test_extract_ensemble_json_from_string(unittest.TestCase): Simple test without errors """ - input_1 = "ENSEMBLE_JSON {\"a\": 10}" + input_1 = 'ENSEMBLE_JSON {"a": 10}' output_1 = extract_ensemble_json_from_string(input_1) self.assertTrue(isinstance(output_1, dict)) - self.assertEqual(output_1, {'a': 10}) + self.assertEqual(output_1, {"a": 10}) def test_2(self): with Capturing() as _: @@ -159,12 +158,12 @@ class test_extract_ensemble_json_from_string(unittest.TestCase): Simple test with 2 lines """ - input_1 = "ENSEMBLE_JSON {\"a\": 10}\nENSEMBLE_JSON {\"b\": 20}" + input_1 = 'ENSEMBLE_JSON {"a": 10}\nENSEMBLE_JSON {"b": 20}' - capturedOutput = StringIO() # Create StringIO object - sys.stdout = capturedOutput # and redirect stdout. + capturedOutput = StringIO() # Create StringIO object + sys.stdout = capturedOutput # and redirect stdout. _ = extract_ensemble_json_from_string(input_1) - sys.stdout = sys.__stdout__ # Reset redirect. + sys.stdout = sys.__stdout__ # Reset redirect. self.assertTrue(capturedOutput.getvalue().startswith("Warning:")) @@ -192,13 +191,13 @@ class test_extract_ensemble_json_from_string(unittest.TestCase): Simple test with missing starting string """ - input_1 = " {\"a\": 10}" + input_1 = ' {"a": 10}' # - capturedOutput = StringIO() # Create StringIO object - sys.stdout = capturedOutput # and redirect stdout. + capturedOutput = StringIO() # Create StringIO object + sys.stdout = capturedOutput # and redirect stdout. _ = extract_ensemble_json_from_string(input_1) - sys.stdout = sys.__stdout__ # Reset redirect. + sys.stdout = sys.__stdout__ # Reset redirect. self.assertTrue(capturedOutput.getvalue().startswith("Error:")) @@ -263,9 +262,15 @@ class test_load_ensemble(unittest.TestCase): _ = test_pop_1.evolve() ensemble_output_1 = test_pop_1.grid_ensemble_results - self.normal_ensemble_output_name = os.path.join(TMP_DIR, 'test_load_ensemble_ensemble_output.json') - self.bzip2_ensemble_output_name = os.path.join(TMP_DIR, 'test_load_ensemble_ensemble_output.json.bz2') - self.no_extension_ensemble_output_name = os.path.join(TMP_DIR, 'test_load_ensemble_ensemble_output') + self.normal_ensemble_output_name = os.path.join( + TMP_DIR, "test_load_ensemble_ensemble_output.json" + ) + self.bzip2_ensemble_output_name = os.path.join( + TMP_DIR, "test_load_ensemble_ensemble_output.json.bz2" + ) + self.no_extension_ensemble_output_name = os.path.join( + TMP_DIR, "test_load_ensemble_ensemble_output" + ) # Write ensemble to json with normal write test_pop_1.write_ensemble(self.normal_ensemble_output_name) @@ -274,7 +279,7 @@ class test_load_ensemble(unittest.TestCase): test_pop_1.write_ensemble(self.bzip2_ensemble_output_name) # Write ensemble without extension - with open(self.no_extension_ensemble_output_name, 'w') as f: + with open(self.no_extension_ensemble_output_name, "w") as f: f.write(json.dumps(ensemble_output_1)) def test_1(self): @@ -305,7 +310,6 @@ class test_load_ensemble(unittest.TestCase): self.assertTrue(isinstance(loaded_data_1, dict)) - def test_3(self): with Capturing() as _: self._test_3() @@ -315,12 +319,11 @@ class test_load_ensemble(unittest.TestCase): Simple test to load ensemble with timing output """ - # - capturedOutput = StringIO() # Create StringIO object - sys.stdout = capturedOutput # and redirect stdout. + capturedOutput = StringIO() # Create StringIO object + sys.stdout = capturedOutput # and redirect stdout. _ = load_ensemble(self.normal_ensemble_output_name, timing=True) - sys.stdout = sys.__stdout__ # Reset redirect. + sys.stdout = sys.__stdout__ # Reset redirect. self.assertTrue("Took" in capturedOutput.getvalue()) @@ -334,10 +337,12 @@ class test_load_ensemble(unittest.TestCase): """ # - capturedOutput = StringIO() # Create StringIO object - sys.stdout = capturedOutput # and redirect stdout. - loaded_data_1 = load_ensemble(self.normal_ensemble_output_name, timing=True, convert_float_keys=True) - sys.stdout = sys.__stdout__ # Reset redirect. + capturedOutput = StringIO() # Create StringIO object + sys.stdout = capturedOutput # and redirect stdout. + loaded_data_1 = load_ensemble( + self.normal_ensemble_output_name, timing=True, convert_float_keys=True + ) + sys.stdout = sys.__stdout__ # Reset redirect. self.assertTrue("Took" in capturedOutput.getvalue()) @@ -435,9 +440,15 @@ class test_open_ensemble(unittest.TestCase): _ = test_pop_1.evolve() ensemble_output_1 = test_pop_1.grid_ensemble_results - self.normal_ensemble_output_name = os.path.join(TMP_DIR, 'test_open_ensemble_ensemble_output.json') - self.bzip2_ensemble_output_name = os.path.join(TMP_DIR, 'test_open_ensemble_ensemble_output.json.bz2') - self.gzip_ensemble_output_name = os.path.join(TMP_DIR, 'test_open_ensemble_ensemble_output.json.gz') + self.normal_ensemble_output_name = os.path.join( + TMP_DIR, "test_open_ensemble_ensemble_output.json" + ) + self.bzip2_ensemble_output_name = os.path.join( + TMP_DIR, "test_open_ensemble_ensemble_output.json.bz2" + ) + self.gzip_ensemble_output_name = os.path.join( + TMP_DIR, "test_open_ensemble_ensemble_output.json.gz" + ) # self.msgpack_ensemble_output_name = os.path.join(TMP_DIR, 'test_open_ensemble_ensemble_output.msgpack') # Write ensemble to json with normal write @@ -461,7 +472,9 @@ class test_open_ensemble(unittest.TestCase): filetype tests """ - self.msgpack_ensemble_output_name = os.path.join(TMP_DIR, 'test_open_ensemble_ensemble_output.msgpack.gz') + self.msgpack_ensemble_output_name = os.path.join( + TMP_DIR, "test_open_ensemble_ensemble_output.msgpack.gz" + ) # handle_1 = open_ensemble(self.normal_ensemble_output_name) diff --git a/binarycpython/tests/test_functions.py b/binarycpython/tests/test_functions.py index 152df888399bd76b8d82f2abdbb88aedcf180b2a..dcc265abaf667d2c927df97e1d0b4f1280dbc8e2 100644 --- a/binarycpython/tests/test_functions.py +++ b/binarycpython/tests/test_functions.py @@ -27,7 +27,6 @@ from binarycpython.utils.functions import ( get_help_super, make_build_text, write_binary_c_parameter_descriptions_to_rst_file, - get_username, bin_data, ) @@ -519,7 +518,9 @@ class test_write_binary_c_parameter_descriptions_to_rst_file(unittest.TestCase): TMP_DIR, "test_write_binary_c_parameter_descriptions_to_rst_file_test_1.txt", ) - self.assertRaises(ValueError, write_binary_c_parameter_descriptions_to_rst_file, output_name) + self.assertRaises( + ValueError, write_binary_c_parameter_descriptions_to_rst_file, output_name + ) def test_checkfile(self): with Capturing() as output: diff --git a/binarycpython/tests/test_grid.py b/binarycpython/tests/test_grid.py index 37e303d112595bb035667a8c05b4153a80c9a8b8..71c2ba13cbfb1c40693b8ff8737e1cfd6f40153f 100644 --- a/binarycpython/tests/test_grid.py +++ b/binarycpython/tests/test_grid.py @@ -30,12 +30,7 @@ import gzip import unittest import numpy as np -from binarycpython.utils.functions import ( - temp_dir, - Capturing, - remove_file, - bin_data -) +from binarycpython.utils.functions import temp_dir, Capturing, remove_file, bin_data from binarycpython.utils.dicts import ( merge_dicts, ) @@ -45,6 +40,7 @@ from binarycpython.utils.grid import Population TMP_DIR = temp_dir("tests", "test_grid") TEST_VERBOSITY = 1 + def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): """ Simple parse function that directly appends all the output to a file @@ -71,6 +67,7 @@ def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): with open(output_filename, "a") as first_f: first_f.write(output + "\n") + def parse_function_adding_results(self, output): """ Example parse function @@ -106,7 +103,7 @@ def parse_function_adding_results(self, output): class test__setup(unittest.TestCase): """ - Unittests for _setup function + Unittests for _setup function """ def test_setup(self): @@ -132,7 +129,7 @@ class test__setup(unittest.TestCase): class test_set(unittest.TestCase): """ - Unittests for _setup function + Unittests for _setup function """ def test_set(self): @@ -163,7 +160,7 @@ class test_set(unittest.TestCase): class test_cmdline(unittest.TestCase): """ - Unittests for cmdline function + Unittests for cmdline function """ def test_cmdline(self): @@ -211,7 +208,7 @@ class test_cmdline(unittest.TestCase): class test__return_argline(unittest.TestCase): """ - Unittests for _return_argline function + Unittests for _return_argline function """ def test__return_argline(self): @@ -242,7 +239,7 @@ class test__return_argline(unittest.TestCase): class test_return_population_settings(unittest.TestCase): """ - Unittests for return_population_settings function + Unittests for return_population_settings function """ def test_return_population_settings(self): @@ -275,7 +272,7 @@ class test_return_population_settings(unittest.TestCase): class test_return_binary_c_defaults(unittest.TestCase): """ - Unittests for return_binary_c_defaults function + Unittests for return_binary_c_defaults function """ def test_return_binary_c_defaults(self): @@ -296,7 +293,7 @@ class test_return_binary_c_defaults(unittest.TestCase): class test_return_all_info(unittest.TestCase): """ - Unittests for return_all_info function + Unittests for return_all_info function """ def test_return_all_info(self): @@ -325,7 +322,7 @@ class test_return_all_info(unittest.TestCase): class test_export_all_info(unittest.TestCase): """ - Unittests for export_all_info function + Unittests for export_all_info function """ def test_export_all_info(self): @@ -397,7 +394,7 @@ class test_export_all_info(unittest.TestCase): class test__cleanup_defaults(unittest.TestCase): """ - Unittests for _cleanup_defaults function + Unittests for _cleanup_defaults function """ def test__cleanup_defaults(self): @@ -416,7 +413,7 @@ class test__cleanup_defaults(unittest.TestCase): class test__increment_probtot(unittest.TestCase): """ - Unittests for _increment_probtot function + Unittests for _increment_probtot function """ def test__increment_probtot(self): @@ -435,7 +432,7 @@ class test__increment_probtot(unittest.TestCase): class test__increment_count(unittest.TestCase): """ - Unittests for _increment_count function + Unittests for _increment_count function """ def test__increment_count(self): @@ -454,7 +451,7 @@ class test__increment_count(unittest.TestCase): class test__dict_from_line_source_file(unittest.TestCase): """ - Unittests for _dict_from_line_source_file function + Unittests for _dict_from_line_source_file function """ def test__dict_from_line_source_file(self): @@ -485,7 +482,7 @@ class test__dict_from_line_source_file(unittest.TestCase): class test_evolve_single(unittest.TestCase): """ - Unittests for evolve_single function + Unittests for evolve_single function """ def test_evolve_single(self): @@ -659,6 +656,7 @@ class test_resultdict(unittest.TestCase): test_case_dict, dict(example_pop.grid_results["example"]["mass"]) ) + class test_grid_evolve(unittest.TestCase): """ Unittests for function Population.evolve() @@ -1169,5 +1167,6 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", 1e-8, ) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_useful_funcs.py b/binarycpython/tests/test_useful_funcs.py index 4e5f24badcc86ecc170d6467f602aaed10347228..1c1ed570951cfca8872beeb18e8e36f881ac6569 100644 --- a/binarycpython/tests/test_useful_funcs.py +++ b/binarycpython/tests/test_useful_funcs.py @@ -18,6 +18,7 @@ from binarycpython.utils.useful_funcs import ( calc_period_from_sep, ) + class test_calc_period_from_sep(unittest.TestCase): """ Unittests for function calc_period_from_sep diff --git a/binarycpython/tests/tests_population_extensions/test_HPC.py b/binarycpython/tests/tests_population_extensions/test_HPC.py index 4794a1ac2b17213958a330deaa5d4a2bd4323601..ccfc1a7e2ee8d242a6d055b2af708975de8f2076 100644 --- a/binarycpython/tests/tests_population_extensions/test_HPC.py +++ b/binarycpython/tests/tests_population_extensions/test_HPC.py @@ -1,23 +1,13 @@ """ Unit tests for the HPC module -TODO: HPC_njobs TODO: HPC_make_joiningfile TODO: HPC_joinlist TODO: HPC_load_joinfiles_list TODO: HPC_join_from_files TODO: HPC_can_join -TODO: HPC_job TODO: HPC_job_task -TODO: HPC_job_type -TODO: HPC_jobID -TODO: HPC_jobID_tuple -TODO: HPC_set_status -TODO: HPC_get_status -TODO: HPC_dirs TODO: HPC_grid -TODO: HPC_check_requirements -TODO: HPC_id_filename TODO: HPC_id_from_dir TODO: HPC_restore TODO: HPC_join_previous @@ -30,7 +20,571 @@ TODO: HPC_dump_status TODO: HPC_queue_stats """ +import os +import shutil import unittest +from binarycpython.utils.functions import Capturing, temp_dir +from binarycpython.utils.grid import Population + +TMP_DIR = temp_dir("tests", "test_condor") +shutil.rmtree(TMP_DIR) +os.makedirs(TMP_DIR, exist_ok=True) + + +class test_HPC_njobs(unittest.TestCase): + """ + Unittests for function HPC_njobs + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_nJobs for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_njobs"] = 10 + + result_condor = condor_pop.HPC_njobs() + + self.assertEqual(result_condor, 10) + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_nJobs for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_njobs"] = 11 + + result_slurm = slurm_pop.HPC_njobs() + + self.assertEqual(result_slurm, 11) + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_nJobs when nothing is set + """ + + none_pop = Population() + self.assertRaises(TypeError, none_pop.HPC_njobs) + + +class test_HPC_job(unittest.TestCase): + """ + Unittests for function HPC_job + """ + + def test_HPC_job(self): + with Capturing() as output: + self._test_HPC_job() + + def _test_HPC_job(self): + """ + Test to see if its busy with a job + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + + self.assertTrue(slurm_pop.HPC_job()) + + +class test_HPC_job_type(unittest.TestCase): + """ + Unittests for function HPC_job_type + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_nJobs for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + result_condor = condor_pop.HPC_job_type() + + self.assertEqual(result_condor, "condor") + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_nJobs for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + result_slurm = slurm_pop.HPC_job_type() + + self.assertEqual(result_slurm, "slurm") + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_nJobs when nothing is set + """ + + none_pop = Population() + result_none = none_pop.HPC_job_type() + self.assertEqual(result_none, "None") + + +class test_HPC_jobID(unittest.TestCase): + """ + Unittests for function HPC_jobID + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_jobID for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + + self.assertEqual( + condor_pop.HPC_jobID(), + "{ClusterID}.{Process}".format(ClusterID=2, Process=3), + ) + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_jobID for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + + self.assertEqual( + slurm_pop.HPC_jobID(), + "{jobid}.{jobarrayindex}".format(jobid=4, jobarrayindex=5), + ) + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_nJobs when nothing is set + """ + + none_pop = Population() + result_none = none_pop.HPC_jobID() + self.assertEqual(result_none, None) + + +class test_HPC_jobID_tuple(unittest.TestCase): + """ + Unittests for function HPC_jobID_tuple + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_jobID_tuple for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + + self.assertEqual(condor_pop.HPC_jobID_tuple(), ("2", "3")) + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_jobID for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + + self.assertEqual(slurm_pop.HPC_jobID_tuple(), ("4", "5")) + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_nJobs when nothing is set + """ + + none_pop = Population() + self.assertEqual(none_pop.HPC_jobID_tuple(), (None, None)) + + +class test_HPC_dirs(unittest.TestCase): + """ + Unittests for function HPC_jobID_tuple + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_dirs for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + self.assertEqual(condor_pop.HPC_dirs(), ["condor_dir"]) + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_jobID for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + self.assertEqual(slurm_pop.HPC_dirs(), ["slurm_dir"]) + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_nJobs when nothing is set + """ + + none_pop = Population() + self.assertEqual(none_pop.HPC_dirs(), []) + + +class test_HPC_id_filename(unittest.TestCase): + """ + Unittests for function HPC_id_filename + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_id_filename for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + self.assertEqual(condor_pop.HPC_id_filename(), "ClusterID") + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_id_filename for slurm + """ + + slurm_pop = Population() + slurm_pop.grid_options["slurm"] = 1 + self.assertEqual(slurm_pop.HPC_id_filename(), "jobid") + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_id_filename when nothing is set + """ + + none_pop = Population() + self.assertEqual(none_pop.HPC_id_filename(), None) + + +class test_HPC_check_requirements(unittest.TestCase): + """ + Unittests for function HPC_check_requirements + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_check_requirements for condor + """ + + condor_pop = Population() + condor_pop.grid_options["condor"] = 1 + self.assertEqual(condor_pop.HPC_id_filename(), "ClusterID") + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + + # First the False test + result_1 = condor_pop.HPC_check_requirements() + self.assertFalse(result_1[0]) + self.assertTrue(len(result_1[1]) > 0) + + # First the True test + condor_pop.grid_options["condor_dir"] = TMP_DIR + result_2 = condor_pop.HPC_check_requirements() + self.assertTrue(result_2[0]) + self.assertTrue(len(result_2[1]) == 0) + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_check_requirements for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + + # First the False test + result_1 = slurm_pop.slurm_check_requirements() + self.assertFalse(result_1[0]) + self.assertTrue(len(result_1[1]) > 0) + + # First the True test + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + result_2 = slurm_pop.slurm_check_requirements() + self.assertTrue(result_2[0]) + self.assertTrue(len(result_2[1]) == 0) + + def test_none(self): + with Capturing() as output: + self._test_none() + + def _test_none(self): + """ + Unit test for HPC_check_requirements when nothing is set + """ + + none_pop = Population(tmp_dir=TMP_DIR) + result_none = none_pop.slurm_check_requirements() + self.assertTrue(result_none[0]) + self.assertTrue(len(result_none[1]) == 0) + + +class test_HPC_set_status(unittest.TestCase): + """ + Unittests for function HPC_set_status + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_set_status for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + condor_pop.grid_options["condor_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ), + exist_ok=True, + ) + condor_pop.HPC_set_status("test_set_condor_status") + + # Check if ID file exists + self.assertTrue( + os.path.isfile( + os.path.join(condor_pop.grid_options["condor_dir"], "ClusterID") + ) + ) + + # Check if status file exists + self.assertTrue( + os.path.isfile( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ) + ) + + with open( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ), + "r", + ) as f: + content_file = f.read() + self.assertTrue(content_file == "test_set_condor_status") + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_set_status for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ), + exist_ok=True, + ) + slurm_pop.HPC_set_status("test_set_slurm_status") + + # Check if ID file exists + self.assertTrue( + os.path.isfile(os.path.join(slurm_pop.grid_options["slurm_dir"], "jobid")) + ) + + # Check if status file exists + self.assertTrue( + os.path.isfile( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ) + ) + + with open( + slurm_pop.slurm_status_file(slurm_dir=slurm_pop.grid_options["slurm_dir"]), + "r", + ) as f: + content_file = f.read() + self.assertTrue(content_file == "test_set_slurm_status") + + +class test_HPC_get_status(unittest.TestCase): + """ + Unittests for function HPC_get_status + """ + + def test_condor(self): + with Capturing() as output: + self._test_condor() + + def _test_condor(self): + """ + Unit test for HPC_get_status for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + condor_pop.grid_options["condor_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ), + exist_ok=True, + ) + condor_pop.HPC_set_status("test_get_condor_status") + + # + status = condor_pop.HPC_get_status() + self.assertEqual(status, "test_get_condor_status") + + def test_slurm(self): + with Capturing() as output: + self._test_slurm() + + def _test_slurm(self): + """ + Unit test for HPC_set_status for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ), + exist_ok=True, + ) + slurm_pop.HPC_set_status("test_set_slurm_status") + + status = slurm_pop.HPC_get_status() + self.assertEqual(status, "test_set_slurm_status") + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_Moe_di_Stefano_2017.py b/binarycpython/tests/tests_population_extensions/test_Moe_di_Stefano_2017.py index 94ad7e8db9963cbf4a4fca2adba2bb83f541b679..69a9f26d91eb739a220658b86cf2bc2a6296c6e5 100644 --- a/binarycpython/tests/tests_population_extensions/test_Moe_di_Stefano_2017.py +++ b/binarycpython/tests/tests_population_extensions/test_Moe_di_Stefano_2017.py @@ -15,4 +15,4 @@ TODO: get_Moe_di_Stefano_2017_default_options_description import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_analytics.py b/binarycpython/tests/tests_population_extensions/test_analytics.py index 152d27312d5c2408adf717e2d52330278c9efcd7..81ba30315aa45c169763df9f6997e9964c66eec4 100644 --- a/binarycpython/tests/tests_population_extensions/test_analytics.py +++ b/binarycpython/tests/tests_population_extensions/test_analytics.py @@ -10,4 +10,4 @@ TODO: CPU_time import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_cache.py b/binarycpython/tests/tests_population_extensions/test_cache.py index 19e526381c97a1e844b9de8a2fdc48a0a7d1a8aa..4f3de7284d84d176484f733c57caa70b30e68494 100644 --- a/binarycpython/tests/tests_population_extensions/test_cache.py +++ b/binarycpython/tests/tests_population_extensions/test_cache.py @@ -10,4 +10,4 @@ TODO: test_caches import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_condor.py b/binarycpython/tests/tests_population_extensions/test_condor.py index 4a449764fb0c17dee75edaf9427c7b8ba788a00b..7a07e38ed7626effa2034a59ccdfd6040095a8c9 100644 --- a/binarycpython/tests/tests_population_extensions/test_condor.py +++ b/binarycpython/tests/tests_population_extensions/test_condor.py @@ -1,20 +1,192 @@ """ Unit classes for the _condor module population extension -TODO: condorID TODO: condorpath TODO: condor_status_file -TODO: condor_check_requirements -TODO: condor_dirs -TODO: set_condor_status -TODO: get_condor_status -TODO: ondor_outfile +TODO: condor_outfile TODO: make_condor_dirs TODO: condor_grid TODO: condor_queue_stats """ +import os +import shutil import unittest +from binarycpython.utils.functions import Capturing, temp_dir +from binarycpython.utils.grid import Population + +TMP_DIR = temp_dir("tests", "test_condor") +shutil.rmtree(TMP_DIR) +os.makedirs(TMP_DIR, exist_ok=True) + + +class test_condorID(unittest.TestCase): + """ + Unittests for function HPC_jobID + """ + + def test_condorID(self): + with Capturing() as output: + self._test_condorID() + + def _test_condorID(self): + """ + Unit test for condorID for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + + self.assertEqual( + condor_pop.condorID(), + "{ClusterID}.{Process}".format(ClusterID=2, Process=3), + ) + + +class test_condor_dirs(unittest.TestCase): + """ + Unittests for function condor_dirs + """ + + def test_condor_dirs(self): + with Capturing() as output: + self._test_condor_dirs() + + def _test_condor_dirs(self): + """ + Unit test for condor_dirs for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + self.assertEqual(condor_pop.condor_dirs(), ["condor_dir"]) + + +class test_condor_check_requirements(unittest.TestCase): + """ + Unittests for function condor_check_requirements + """ + + def test_condor_check_requirements(self): + with Capturing() as output: + self._test_condor_check_requirements() + + def _test_condor_check_requirements(self): + """ + Unit test for condor_check_requirements for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + + # First the False test + result_1 = condor_pop.condor_check_requirements() + self.assertFalse(result_1[0]) + self.assertTrue(len(result_1[1]) > 0) + + # First the True test + condor_pop.grid_options["condor_dir"] = TMP_DIR + result_2 = condor_pop.condor_check_requirements() + self.assertTrue(result_2[0]) + self.assertTrue(len(result_2[1]) == 0) + + +class test_set_condor_status(unittest.TestCase): + """ + Unittests for function HPC_jobID + """ + + def test_set_condor_status(self): + with Capturing() as output: + self._test_set_condor_status() + + def _test_set_condor_status(self): + """ + Unit test for set_condor_status for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + condor_pop.grid_options["condor_dir"] = TMP_DIR + + id_filename = os.path.isfile( + os.path.join(condor_pop.grid_options["condor_dir"], "ClusterID") + ) + if os.path.isfile(id_filename): + os.remove(id_filename) + + # + os.makedirs( + os.path.dirname( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ), + exist_ok=True, + ) + condor_pop.set_condor_status("test_set_condor_status") + + # Check if ID file exists + self.assertTrue(os.path.exists(id_filename)) + + # Check if status file exists + self.assertTrue( + os.path.isfile( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ) + ) + + with open( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ), + "r", + ) as f: + content_file = f.read() + self.assertTrue(content_file == "test_set_condor_status") + + +class test_get_condor_status(unittest.TestCase): + """ + Unittests for function get_condor_status + """ + + def test_get_condor_status(self): + with Capturing() as output: + self._test_get_condor_status() + + def _test_get_condor_status(self): + """ + Unit test for get_condor_status for condor + """ + + condor_pop = Population(tmp_dir=TMP_DIR) + condor_pop.grid_options["condor"] = 1 + condor_pop.grid_options["condor_ClusterID"] = 2 + condor_pop.grid_options["condor_Process"] = 3 + condor_pop.grid_options["condor_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + condor_pop.condor_status_file( + condor_dir=condor_pop.grid_options["condor_dir"] + ) + ), + exist_ok=True, + ) + condor_pop.set_condor_status("test_get_condor_status") + + # + status = condor_pop.get_condor_status() + self.assertEqual(status, "test_get_condor_status") + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_dataIO.py b/binarycpython/tests/tests_population_extensions/test_dataIO.py index aa3a2ad3f19083c55c3a1ac4d2e04f7ff357662f..bfe30739e4f7a44ca539461e4bc7ea092fdfe6cc 100644 --- a/binarycpython/tests/tests_population_extensions/test_dataIO.py +++ b/binarycpython/tests/tests_population_extensions/test_dataIO.py @@ -24,4 +24,4 @@ TODO: NFSpath import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_distribution_functions.py b/binarycpython/tests/tests_population_extensions/test_distribution_functions.py index a471f72acb6aa3676de0da6b9681906d67c24b11..fcf4699e8d10ec00d4307a7459889f1391bbca95 100644 --- a/binarycpython/tests/tests_population_extensions/test_distribution_functions.py +++ b/binarycpython/tests/tests_population_extensions/test_distribution_functions.py @@ -42,6 +42,7 @@ Q_LIST = [0.01, 0.2, 0.4, 0.652, 0.823, 1] PER_LIST = [10 ** logper for logper in LOGPER_LIST] TOLERANCE = 1e-5 + class test_flat(unittest.TestCase): """ Class for unit test of flat @@ -102,12 +103,16 @@ class test_const_distribution(unittest.TestCase): distribution_functions_pop = Population() - output_1 = distribution_functions_pop.const_distribution(min_bound=0, max_bound=2) + output_1 = distribution_functions_pop.const_distribution( + min_bound=0, max_bound=2 + ) self.assertEqual( output_1, 0.5, msg="Value should be 0.5, but is {}".format(output_1) ) - output_2 = distribution_functions_pop.const_distribution(min_bound=0, max_bound=2, val=3) + output_2 = distribution_functions_pop.const_distribution( + min_bound=0, max_bound=2, val=3 + ) self.assertEqual( output_2, 0, msg="Value should be 0, but is {}".format(output_2) ) @@ -142,7 +147,9 @@ class test_powerlaw(unittest.TestCase): for mass in MASS_LIST: input_lists.append(mass) - python_results.append(distribution_functions_pop.powerlaw(1, 100, -2.3, mass)) + python_results.append( + distribution_functions_pop.powerlaw(1, 100, -2.3, mass) + ) # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -152,7 +159,9 @@ class test_powerlaw(unittest.TestCase): self.assertLess(np.abs(python_results[i] - perl_results[i]), TOLERANCE) # extra test for k = -1 - self.assertRaises(ValueError, distribution_functions_pop.powerlaw, 1, 100, -1, 10) + self.assertRaises( + ValueError, distribution_functions_pop.powerlaw, 1, 100, -1, 10 + ) class test_three_part_power_law(unittest.TestCase): @@ -185,7 +194,9 @@ class test_three_part_power_law(unittest.TestCase): for mass in MASS_LIST: input_lists.append(mass) python_results.append( - distribution_functions_pop.three_part_powerlaw(mass, 0.08, 0.1, 1, 300, -1.3, -2.3, -2.3) + distribution_functions_pop.three_part_powerlaw( + mass, 0.08, 0.1, 1, 300, -1.3, -2.3, -2.3 + ) ) # GO over the results and check whether they are equal (within tolerance) @@ -200,7 +211,10 @@ class test_three_part_power_law(unittest.TestCase): # Extra test: # M < M0 self.assertTrue( - distribution_functions_pop.three_part_powerlaw(0.05, 0.08, 0.1, 1, 300, -1.3, -2.3, -2.3) == 0, + distribution_functions_pop.three_part_powerlaw( + 0.05, 0.08, 0.1, 1, 300, -1.3, -2.3, -2.3 + ) + == 0, msg="Probability should be zero as M < M0", ) @@ -248,7 +262,9 @@ class test_Kroupa2001(unittest.TestCase): # Extra tests: self.assertEqual( distribution_functions_pop.Kroupa2001(10, newopts={"mmax": 300}), - distribution_functions_pop.three_part_powerlaw(10, 0.1, 0.5, 1, 300, -1.3, -2.3, -2.3), + distribution_functions_pop.three_part_powerlaw( + 10, 0.1, 0.5, 1, 300, -1.3, -2.3, -2.3 + ), ) @@ -295,7 +311,9 @@ class test_ktg93(unittest.TestCase): # extra test: self.assertEqual( distribution_functions_pop.ktg93(10, newopts={"mmax": 300}), - distribution_functions_pop.three_part_powerlaw(10, 0.1, 0.5, 1, 300, -1.3, -2.2, -2.7), + distribution_functions_pop.three_part_powerlaw( + 10, 0.1, 0.5, 1, 300, -1.3, -2.2, -2.7 + ), ) @@ -318,7 +336,9 @@ class test_imf_tinsley1980(unittest.TestCase): m = 1.2 self.assertEqual( distribution_functions_pop.imf_tinsley1980(m), - distribution_functions_pop.three_part_powerlaw(m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3), + distribution_functions_pop.three_part_powerlaw( + m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3 + ), ) @@ -341,7 +361,9 @@ class test_imf_scalo1986(unittest.TestCase): m = 1.2 self.assertEqual( distribution_functions_pop.imf_scalo1986(m), - distribution_functions_pop.three_part_powerlaw(m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70), + distribution_functions_pop.three_part_powerlaw( + m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70 + ), ) @@ -364,7 +386,9 @@ class test_imf_scalo1998(unittest.TestCase): m = 1.2 self.assertEqual( distribution_functions_pop.imf_scalo1998(m), - distribution_functions_pop.three_part_powerlaw(m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3), + distribution_functions_pop.three_part_powerlaw( + m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3 + ), ) @@ -385,7 +409,9 @@ class test_imf_chabrier2003(unittest.TestCase): distribution_functions_pop = Population() input_1 = 0 - self.assertRaises(ValueError, distribution_functions_pop.imf_chabrier2003, input_1) + self.assertRaises( + ValueError, distribution_functions_pop.imf_chabrier2003, input_1 + ) masses = [0.1, 0.2, 0.5, 1, 2, 10, 15, 50] perl_results = [ @@ -398,7 +424,9 @@ class test_imf_chabrier2003(unittest.TestCase): 0.000315578044662863, 1.97918170035704e-05, ] - python_results = [distribution_functions_pop.imf_chabrier2003(m) for m in masses] + python_results = [ + distribution_functions_pop.imf_chabrier2003(m) for m in masses + ] # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -426,7 +454,10 @@ class test_duquennoy1991(unittest.TestCase): distribution_functions_pop = Population() - self.assertEqual(distribution_functions_pop.duquennoy1991(4.2), distribution_functions_pop.gaussian(4.2, 4.8, 2.3, -2, 12)) + self.assertEqual( + distribution_functions_pop.duquennoy1991(4.2), + distribution_functions_pop.gaussian(4.2, 4.8, 2.3, -2, 12), + ) class test_gaussian(unittest.TestCase): @@ -458,7 +489,9 @@ class test_gaussian(unittest.TestCase): for logper in LOGPER_LIST: input_lists.append(logper) - python_results.append(distribution_functions_pop.gaussian(logper, 4.8, 2.3, -2.0, 12.0)) + python_results.append( + distribution_functions_pop.gaussian(logper, 4.8, 2.3, -2.0, 12.0) + ) # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -505,7 +538,9 @@ class test_Arenou2010_binary_fraction(unittest.TestCase): for mass in MASS_LIST: input_lists.append(mass) - python_results.append(distribution_functions_pop.Arenou2010_binary_fraction(mass)) + python_results.append( + distribution_functions_pop.Arenou2010_binary_fraction(mass) + ) # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -539,7 +574,9 @@ class test_raghavan2010_binary_fraction(unittest.TestCase): for mass in MASS_LIST: input_lists.append(mass) - python_results.append(distribution_functions_pop.raghavan2010_binary_fraction(mass)) + python_results.append( + distribution_functions_pop.raghavan2010_binary_fraction(mass) + ) # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -612,7 +649,9 @@ class test_Izzard2012_period_distribution(unittest.TestCase): for per in PER_LIST: input_lists.append([mass, per]) - python_results.append(distribution_functions_pop.Izzard2012_period_distribution(per, mass)) + python_results.append( + distribution_functions_pop.Izzard2012_period_distribution(per, mass) + ) # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): @@ -654,7 +693,9 @@ class test_flatsections(unittest.TestCase): for q in Q_LIST: input_lists.append(q) python_results.append( - distribution_functions_pop.flatsections(q, [{"min": 0.01, "max": 1.0, "height": 1.0}]) + distribution_functions_pop.flatsections( + q, [{"min": 0.01, "max": 1.0, "height": 1.0}] + ) ) # GO over the results and check whether they are equal (within tolerance) @@ -920,7 +961,7 @@ class test_sana12(unittest.TestCase): mass, mass_2, sep, per, sep_min, sep_max, 0.15, 5.5, -0.55 ) ) - + # GO over the results and check whether they are equal (within tolerance) for i in range(len(python_results)): msg = "Error: Value perl: {} Value python: {} for mass, mass2, per: {}".format( diff --git a/binarycpython/tests/tests_population_extensions/test_grid_logging.py b/binarycpython/tests/tests_population_extensions/test_grid_logging.py index 584edf446f7b3c3c6c266b36a85cbe29eb3d219d..601487c49835998b3660fd06c78136070454cb40 100644 --- a/binarycpython/tests/tests_population_extensions/test_grid_logging.py +++ b/binarycpython/tests/tests_population_extensions/test_grid_logging.py @@ -15,4 +15,4 @@ TODO: _clean_up_custom_logging import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_grid_options_defaults.py b/binarycpython/tests/tests_population_extensions/test_grid_options_defaults.py index c6ba68fe37e0ecd3d47525f98dda8ea9a57f4864..7739582943a2759c5a0c7ad8415ce35c971172c3 100644 --- a/binarycpython/tests/tests_population_extensions/test_grid_options_defaults.py +++ b/binarycpython/tests/tests_population_extensions/test_grid_options_defaults.py @@ -17,6 +17,7 @@ from binarycpython.utils.grid import Population TMP_DIR = temp_dir("tests", "test_grid_options_defaults") + class test_grid_options_help(unittest.TestCase): """ Unit tests for the grid_options_help function @@ -74,7 +75,9 @@ class test_grid_options_description_checker(unittest.TestCase): grid_options_defaults_pop = Population() - output_1 = grid_options_defaults_pop.grid_options_description_checker(print_info=True) + output_1 = grid_options_defaults_pop.grid_options_description_checker( + print_info=True + ) self.assertTrue(isinstance(output_1, int)) self.assertTrue(output_1 > 0) @@ -97,12 +100,17 @@ class test_write_grid_options_to_rst_file(unittest.TestCase): grid_options_defaults_pop = Population() input_1 = os.path.join(TMP_DIR, "test_write_grid_options_to_rst_file_1.txt") - self.assertRaises(ValueError, grid_options_defaults_pop.write_grid_options_to_rst_file, input_1) + self.assertRaises( + ValueError, + grid_options_defaults_pop.write_grid_options_to_rst_file, + input_1, + ) input_2 = os.path.join(TMP_DIR, "test_write_grid_options_to_rst_file_2.rst") _ = grid_options_defaults_pop.write_grid_options_to_rst_file(input_2) self.assertTrue(os.path.isfile(input_2)) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_gridcode.py b/binarycpython/tests/tests_population_extensions/test_gridcode.py index 0be209d358883ade4ab1c81a8ae2a939b5112c69..7fdce8e6fd1e794d4858ddcc4562e727c17c4037 100644 --- a/binarycpython/tests/tests_population_extensions/test_gridcode.py +++ b/binarycpython/tests/tests_population_extensions/test_gridcode.py @@ -23,9 +23,10 @@ from binarycpython.utils.functions import ( from binarycpython.utils.grid import Population + class test_add_grid_variable(unittest.TestCase): """ - Unittests for add_grid_variable function + Unittests for add_grid_variable function """ def test_add_grid_variable(self): @@ -73,5 +74,6 @@ class test_add_grid_variable(unittest.TestCase): self.assertIn("lnm1", test_pop.grid_options["_grid_variables"]) self.assertEqual(len(test_pop.grid_options["_grid_variables"]), 2) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_metadata.py b/binarycpython/tests/tests_population_extensions/test_metadata.py index 95f6e81f197c8555e413f75c356ba08541d198cd..3c98fe4c31886ecd7b6bf88baf36ee82b39b583f 100644 --- a/binarycpython/tests/tests_population_extensions/test_metadata.py +++ b/binarycpython/tests/tests_population_extensions/test_metadata.py @@ -9,4 +9,4 @@ TODO: _metadata_keylist import unittest if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_slurm.py b/binarycpython/tests/tests_population_extensions/test_slurm.py index d2734f9ce0ae58368ce9d365a45efe89471f091a..1a78d33a7486aa40e2b8480b0266f06d50c1a038 100644 --- a/binarycpython/tests/tests_population_extensions/test_slurm.py +++ b/binarycpython/tests/tests_population_extensions/test_slurm.py @@ -1,20 +1,189 @@ """ Unittests for slurm module -TODO: slurmID TODO: slurmpath TODO: slurm_status_file -TODO: slurm_check_requirements -TODO: slurm_dirs -TODO: set_slurm_status -TODO: get_slurm_status TODO: slurm_outfile TODO: make_slurm_dirs TODO: slurm_grid TODO: slurm_queue_stats """ +import os +import shutil import unittest +from binarycpython.utils.functions import Capturing, temp_dir +from binarycpython.utils.grid import Population + +TMP_DIR = temp_dir("tests", "test_slurm") +shutil.rmtree(TMP_DIR) +os.makedirs(TMP_DIR, exist_ok=True) + + +class test_slurmID(unittest.TestCase): + """ + Unittests for function slurmID + """ + + def test_slurmID(self): + with Capturing() as output: + self._test_slurmID() + + def _test_slurmID(self): + """ + Unit test for slurmID for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + + self.assertEqual( + slurm_pop.HPC_jobID(), + "{jobid}.{jobarrayindex}".format(jobid=4, jobarrayindex=5), + ) + + +class test_slurm_dirs(unittest.TestCase): + """ + Unittests for function slurm_dirs + """ + + def test_slurm_dirs(self): + with Capturing() as output: + self._test_slurm_dirs() + + def _test_slurm_dirs(self): + """ + Unit test for slurm_dirs for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + self.assertEqual(slurm_pop.slurm_dirs(), ["slurm_dir"]) + + +class test_slurm_check_requirements(unittest.TestCase): + """ + Unittests for function slurm_check_requirements + """ + + def test_slurm_check_requirements(self): + with Capturing() as output: + self._test_slurm_check_requirements() + + def _test_slurm_check_requirements(self): + """ + Unit test for slurm_check_requirements for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + + # First the False test + result_1 = slurm_pop.slurm_check_requirements() + self.assertFalse(result_1[0]) + self.assertTrue(len(result_1[1]) > 0) + + # First the True test + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + result_2 = slurm_pop.slurm_check_requirements() + self.assertTrue(result_2[0]) + self.assertTrue(len(result_2[1]) == 0) + + +class test_set_slurm_status(unittest.TestCase): + """ + Unittests for function set_slurm_status + """ + + def test_set_slurm_status(self): + with Capturing() as output: + self._test_set_slurm_status() + + def _test_set_slurm_status(self): + """ + Unit test for set_slurm_status for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + + id_filename = os.path.isfile( + os.path.join(slurm_pop.grid_options["slurm_dir"], "jobid") + ) + if os.path.isfile(id_filename): + os.remove(id_filename) + + # + os.makedirs( + os.path.dirname( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ), + exist_ok=True, + ) + slurm_pop.set_slurm_status("test_set_slurm_status") + + # Check if ID file exists + self.assertTrue(os.path.exists(id_filename)) + + # Check if status file exists + self.assertTrue( + os.path.isfile( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ) + ) + + with open( + slurm_pop.slurm_status_file(slurm_dir=slurm_pop.grid_options["slurm_dir"]), + "r", + ) as f: + content_file = f.read() + self.assertTrue(content_file == "test_set_slurm_status") + + +class test_get_slurm_status(unittest.TestCase): + """ + Unittests for function get_slurm_status + """ + + def test_get_slurm_status(self): + with Capturing() as output: + self._test_get_slurm_status() + + def _test_get_slurm_status(self): + """ + Unit test for get_slurm_status for slurm + """ + + slurm_pop = Population(tmp_dir=TMP_DIR) + slurm_pop.grid_options["slurm"] = 1 + slurm_pop.grid_options["slurm_jobid"] = 4 + slurm_pop.grid_options["slurm_jobarrayindex"] = 5 + slurm_pop.grid_options["slurm_dir"] = TMP_DIR + + # + os.makedirs( + os.path.dirname( + slurm_pop.slurm_status_file( + slurm_dir=slurm_pop.grid_options["slurm_dir"] + ) + ), + exist_ok=True, + ) + slurm_pop.set_slurm_status("test_set_slurm_status") + + status = slurm_pop.get_slurm_status() + self.assertEqual(status, "test_set_slurm_status") + if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_spacing_functions.py b/binarycpython/tests/tests_population_extensions/test_spacing_functions.py index 663f3b91b307061f87cd84d1233ff32eb37a60cd..563081068a1fc0b63fae60bf34eb1410ff0e301c 100644 --- a/binarycpython/tests/tests_population_extensions/test_spacing_functions.py +++ b/binarycpython/tests/tests_population_extensions/test_spacing_functions.py @@ -37,5 +37,6 @@ class test_spacing_functions(unittest.TestCase): msg="Output didn't contain SINGLE_STAR_LIFETIME", ) + if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/tests_population_extensions/test_version_info.py b/binarycpython/tests/tests_population_extensions/test_version_info.py index 195fdfceb3fde14f3b2f0bf8984af76b849e1136..1f88f49febc68e2042d5b972e7b067987f9d3aec 100644 --- a/binarycpython/tests/tests_population_extensions/test_version_info.py +++ b/binarycpython/tests/tests_population_extensions/test_version_info.py @@ -87,7 +87,9 @@ class test_return_binary_c_version_info(unittest.TestCase): """ # also tests the parse_version_info indirectly - version_info_parsed = self._version_info_pop.return_binary_c_version_info(parsed=True) + version_info_parsed = self._version_info_pop.return_binary_c_version_info( + parsed=True + ) self.assertTrue(isinstance(version_info_parsed, dict)) self.assertIn("isotopes", version_info_parsed.keys()) @@ -108,10 +110,12 @@ class test_return_binary_c_version_info(unittest.TestCase): Test for the parsed version info with a value already present """ - os.environ["BINARY_C_MACRO_HEADER"] = 'macroxyz' + os.environ["BINARY_C_MACRO_HEADER"] = "macroxyz" # also tests the parse_version_info indirectly - version_info_parsed = self._version_info_pop.return_binary_c_version_info(parsed=True) + version_info_parsed = self._version_info_pop.return_binary_c_version_info( + parsed=True + ) self.assertTrue(isinstance(version_info_parsed, dict)) self.assertIn("isotopes", version_info_parsed.keys()) @@ -165,5 +169,6 @@ class test_parse_binary_c_version_info(unittest.TestCase): if parsed_info["macros"]["NUCSYN_ID_SOURCES"] == "on": self.assertIsNotNone(parsed_info["nucleosynthesis_sources"]) + if __name__ == "__main__": unittest.main()