diff --git a/binarycpython/tests/main.py b/binarycpython/tests/main.py index a30a9e42d14aa41f94f234281c10ef317f6ec980..9725fd0f2a7681ba9b031a6eab582e20b6b2d453 100755 --- a/binarycpython/tests/main.py +++ b/binarycpython/tests/main.py @@ -1,4 +1,4 @@ -#/usr/bin/env python +# /usr/bin/env python """ Main file for the tests. This file imports all the combined_test functions from all files. """ diff --git a/binarycpython/tests/main_with_notebooks.py b/binarycpython/tests/main_with_notebooks.py index abafd95b221ca038e06521ed47a79c4f7198e336..7337c6896d798a53bdfb5a67914ad27e494077c6 100755 --- a/binarycpython/tests/main_with_notebooks.py +++ b/binarycpython/tests/main_with_notebooks.py @@ -1,4 +1,4 @@ -#/usr/bin/env python +# /usr/bin/env python """ Main file for the tests. This file imports all the combined_test functions from all files. """ diff --git a/binarycpython/tests/test_custom_logging.py b/binarycpython/tests/test_custom_logging.py index 17f54545f0941c2a5dbb8db823d6e12bafb958d0..d5068984929c1666560afaf9592900ad9d75b770 100644 --- a/binarycpython/tests/test_custom_logging.py +++ b/binarycpython/tests/test_custom_logging.py @@ -9,6 +9,7 @@ from binarycpython.utils.functions import Capturing TMP_DIR = temp_dir("tests", "test_custom_logging") + class test_custom_logging(unittest.TestCase): """ Unit test for the custom_logging module @@ -90,14 +91,10 @@ class test_custom_logging(unittest.TestCase): ) self.assertTrue( - os.path.isfile( - os.path.join(TMP_DIR, "test_binary_c_write_log_code.txt") - ), + os.path.isfile(os.path.join(TMP_DIR, "test_binary_c_write_log_code.txt")), msg="File not created", ) - with open( - os.path.join(TMP_DIR, "test_binary_c_write_log_code.txt") - ) as f: + with open(os.path.join(TMP_DIR, "test_binary_c_write_log_code.txt")) as f: content_file = repr(f.read()) self.assertEqual(repr(input_1), content_file, msg="Contents are not similar") @@ -128,7 +125,11 @@ class test_custom_logging(unittest.TestCase): input_2 = "version" output_2 = from_binary_c_config(BINARY_C_CONFIG, input_2) - self.assertIn(output_2, ["2.1.7", "2.2pre1", "2.2.0", "2.2.1"], msg="binary_c version doesnt match") + self.assertIn( + output_2, + ["2.1.7", "2.2pre1", "2.2.0", "2.2.1"], + msg="binary_c version doesnt match", + ) def test_return_compilation_dict(self): with Capturing() as output: diff --git a/binarycpython/tests/test_distributions.py b/binarycpython/tests/test_distributions.py index ba90e5d50fb9b2b4226f0588e990bea80fb3820b..85d08e3465294da39b18baf22ed6605f08e87afd 100644 --- a/binarycpython/tests/test_distributions.py +++ b/binarycpython/tests/test_distributions.py @@ -6,13 +6,11 @@ import unittest from binarycpython.utils.distribution_functions import * from binarycpython.utils.useful_funcs import calc_sep_from_period -from binarycpython.utils.functions import ( - Capturing, - temp_dir -) +from binarycpython.utils.functions import Capturing, temp_dir TMP_DIR = temp_dir("tests", "test_distributions") + class TestDistributions(unittest.TestCase): """ Unittest class diff --git a/binarycpython/tests/test_functions.py b/binarycpython/tests/test_functions.py index eb02619d559fa990d44a29fe557fc81f5c798975..46bfbbbfce4cc5fdf53b511f3ddf3999c4630736 100644 --- a/binarycpython/tests/test_functions.py +++ b/binarycpython/tests/test_functions.py @@ -13,6 +13,7 @@ from binarycpython.utils.functions import * TMP_DIR = temp_dir("tests", "test_functions") + class dummy: """ Dummy class to be used in the merge_dicts @@ -72,9 +73,7 @@ class test_remove_file(unittest.TestCase): Test to remove a file """ - with open( - os.path.join(TMP_DIR, "test_remove_file_file.txt"), "w" - ) as f: + with open(os.path.join(TMP_DIR, "test_remove_file_file.txt"), "w") as f: f.write("test") remove_file(os.path.join(TMP_DIR, "test_remove_file_file.txt")) @@ -855,6 +854,7 @@ class test_handle_ensemble_string_to_json(unittest.TestCase): self.assertTrue(output_dict["ding"] == 10) self.assertTrue(output_dict["list_example"] == [1, 2, 3]) + class test_AutoVivicationDict(unittest.TestCase): """ Unittests for AutoVivicationDict @@ -867,11 +867,12 @@ class test_AutoVivicationDict(unittest.TestCase): result_dict = AutoVivificationDict() - result_dict['a']['b']['c'] += 10 + result_dict["a"]["b"]["c"] += 10 + + self.assertEqual(result_dict["a"]["b"]["c"], 10) + result_dict["a"]["b"]["c"] += 10 + self.assertEqual(result_dict["a"]["b"]["c"], 20) - self.assertEqual(result_dict['a']['b']['c'], 10) - result_dict['a']['b']['c'] += 10 - self.assertEqual(result_dict['a']['b']['c'], 20) class test_bin_data(unittest.TestCase): """ @@ -916,6 +917,5 @@ class test_bin_data(unittest.TestCase): self.assertEqual(binned_value, -0.5) - if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_grid.py b/binarycpython/tests/test_grid.py index cb3129423ba4409999a0635ca64cd7e4b51a2a14..2d1d9b0feb323ad6b9ac52db6ade2ded48a05cf1 100644 --- a/binarycpython/tests/test_grid.py +++ b/binarycpython/tests/test_grid.py @@ -18,13 +18,14 @@ from binarycpython.utils.functions import ( merge_dicts, remove_file, Capturing, - bin_data + bin_data, ) from binarycpython.utils.custom_logging_functions import binary_c_log_code TMP_DIR = temp_dir("tests", "test_grid") TEST_VERBOSITY = 1 + def parse_function_test_grid_evolve_2_threads_with_custom_logging(self, output): """ Simple parse function that directly appends all the output to a file @@ -484,7 +485,8 @@ class test_Population(unittest.TestCase): M_2=5, orbital_period=100000, metallicty=0.02, - max_evolution_time=15000, verbosity=TEST_VERBOSITY + max_evolution_time=15000, + verbosity=TEST_VERBOSITY, ) test_pop.set(C_logging_code=CUSTOM_LOGGING_STRING_MASSES) @@ -503,7 +505,8 @@ class test_Population(unittest.TestCase): M_2=5, orbital_period=100000, metallicty=0.02, - max_evolution_time=15000, verbosity=TEST_VERBOSITY + max_evolution_time=15000, + verbosity=TEST_VERBOSITY, ) test_pop_2.set(C_auto_logging=custom_logging_dict) @@ -553,7 +556,9 @@ class test_grid_evolve(unittest.TestCase): analytics = test_pop_evolve_1_thread.evolve() self.assertLess( - np.abs(analytics["total_probability"] - 0.10820655287892997), 1e-10, msg=analytics["total_probability"] + np.abs(analytics["total_probability"] - 0.10820655287892997), + 1e-10, + msg=analytics["total_probability"], ) self.assertTrue(analytics["total_count"] == 10) @@ -567,7 +572,9 @@ class test_grid_evolve(unittest.TestCase): """ test_pop = Population() - test_pop.set(amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY) + test_pop.set( + amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY + ) resolution = {"M_1": 10} @@ -588,7 +595,9 @@ class test_grid_evolve(unittest.TestCase): analytics = test_pop.evolve() self.assertLess( - np.abs(analytics["total_probability"] - 0.10820655287892997), 1e-10, msg=analytics["total_probability"] + np.abs(analytics["total_probability"] - 0.10820655287892997), + 1e-10, + msg=analytics["total_probability"], ) # self.assertTrue(analytics["total_count"] == 10) @@ -615,7 +624,6 @@ class test_grid_evolve(unittest.TestCase): data_dir=data_dir_value, C_logging_code=custom_logging_string, # input it like this. parse_function=parse_function_test_grid_evolve_2_threads_with_custom_logging, - ) test_pop.set(ensemble=0) resolution = {"M_1": 2} @@ -666,7 +674,9 @@ class test_grid_evolve(unittest.TestCase): """ test_pop = Population() - test_pop.set(amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY) + test_pop.set( + amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY + ) # Set the amt of failed systems that each thread will log test_pop.set(failed_systems_threshold=4) @@ -706,7 +716,9 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", analytics = test_pop.evolve() self.assertLess( - np.abs(analytics["total_probability"] - 0.10820655287892997), 1e-10, msg=analytics["total_probability"] + np.abs(analytics["total_probability"] - 0.10820655287892997), + 1e-10, + msg=analytics["total_probability"], ) # self.assertEqual(analytics["failed_systems_error_codes"], [0]) self.assertTrue(analytics["total_count"] == 10) @@ -717,7 +729,9 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", # test to see if 1 thread does all the systems test_pop = Population() - test_pop.set(amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY) + test_pop.set( + amt_cores=2, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY + ) test_pop.set(failed_systems_threshold=4) test_pop.set(C_logging_code=CUSTOM_LOGGING_STRING_WITH_EXIT) @@ -769,7 +783,9 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", """ test_pop = Population() - test_pop.set(amt_cores=1, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY) + test_pop.set( + amt_cores=1, M_2=1, orbital_period=100000, verbosity=TEST_VERBOSITY + ) resolution = {"M_1": 10} self.assertRaises(ValueError, test_pop.evolve) @@ -1030,6 +1046,7 @@ Printf("TEST_CUSTOM_LOGGING_1 %30.12e %g %g %g %g\\n", 1e-8, ) + def parse_function_adding_results(self, output): """ Example parse function @@ -1039,7 +1056,7 @@ def parse_function_adding_results(self, output): parameters = ["time", "mass", "zams_mass", "probability", "stellar_type"] - self.grid_results['example']['count'] += 1 + self.grid_results["example"]["count"] += 1 # Go over the output. for line in output.splitlines(): @@ -1050,7 +1067,9 @@ def parse_function_adding_results(self, output): values = line.split()[1:] # Bin the mass probability - self.grid_results['example']['mass'][bin_data(float(values[2]), binwidth=0.5)] += float(values[3]) + self.grid_results["example"]["mass"][ + bin_data(float(values[2]), binwidth=0.5) + ] += float(values[3]) # if not len(parameters) == len(values): @@ -1058,7 +1077,8 @@ def parse_function_adding_results(self, output): raise ValueError # record the probability of this line (Beware, this is meant to only be run once for each system. its a controls quantity) - self.grid_results['example']['probability'] += float(values[3]) + self.grid_results["example"]["probability"] += float(values[3]) + class test_resultdict(unittest.TestCase): """ @@ -1094,11 +1114,8 @@ class test_resultdict(unittest.TestCase): # grid_options amt_cores=3, tmp_dir=TMP_DIR, - # Custom options - data_dir=os.path.join( - TMP_DIR, "test_resultdict" - ), # custom_options + data_dir=os.path.join(TMP_DIR, "test_resultdict"), # custom_options C_logging_code=custom_logging_statement, parse_function=parse_function_adding_results, ) @@ -1112,7 +1129,9 @@ class test_resultdict(unittest.TestCase): longname="Primary mass", valuerange=[2, 150], resolution="{}".format(resolution["M_1"]), - spacingfunc="const(math.log(2), math.log(150), {})".format(resolution["M_1"]), + spacingfunc="const(math.log(2), math.log(150), {})".format( + resolution["M_1"] + ), precode="M_1=math.exp(lnm1)", probdist="three_part_powerlaw(M_1, 0.1, 0.5, 1.0, 150, -1.3, -2.3, -2.3)*M_1", dphasevol="dlnm1", @@ -1125,35 +1144,49 @@ class test_resultdict(unittest.TestCase): analytics = example_pop.evolve() # - grid_prob = analytics['total_probability'] - result_dict_prob = example_pop.grid_results['example']['probability'] + grid_prob = analytics["total_probability"] + result_dict_prob = example_pop.grid_results["example"]["probability"] # amt systems - grid_count = analytics['total_count'] - result_dict_count = example_pop.grid_results['example']['count'] + grid_count = analytics["total_count"] + result_dict_count = example_pop.grid_results["example"]["count"] # Check if the total probability matches self.assertAlmostEqual( grid_prob, result_dict_prob, places=12, - msg="Total probability from grid {} and from result dict {} are not equal".format(grid_prob, result_dict_prob) + msg="Total probability from grid {} and from result dict {} are not equal".format( + grid_prob, result_dict_prob + ), ) # Check if the total count matches self.assertEqual( grid_count, result_dict_count, - msg="Total count from grid {} and from result dict {} are not equal".format(grid_count, result_dict_count) + msg="Total count from grid {} and from result dict {} are not equal".format( + grid_count, result_dict_count + ), ) # Check if the structure is what we expect. Note: this depends on the probability calculation. if that changes we need to recalibrate this - test_case_dict = {2.25: 0.01895481306515, 3.75: 0.01081338190204, 5.75: 0.006168841009268, 9.25: 0.003519213484031, 13.75: 0.002007648361756, 21.25: 0.001145327489437, 33.25: 0.0006533888518775, 50.75: 0.0003727466560393, 78.25: 0.000212645301782, 120.75: 0.0001213103421247} - - self.assertEqual(test_case_dict, dict(example_pop.grid_results['example']['mass'])) - - + test_case_dict = { + 2.25: 0.01895481306515, + 3.75: 0.01081338190204, + 5.75: 0.006168841009268, + 9.25: 0.003519213484031, + 13.75: 0.002007648361756, + 21.25: 0.001145327489437, + 33.25: 0.0006533888518775, + 50.75: 0.0003727466560393, + 78.25: 0.000212645301782, + 120.75: 0.0001213103421247, + } + self.assertEqual( + test_case_dict, dict(example_pop.grid_results["example"]["mass"]) + ) if __name__ == "__main__": diff --git a/binarycpython/tests/test_grid_options_defaults.py b/binarycpython/tests/test_grid_options_defaults.py index ae1ccb9124db5a771800078b0328d93920dc2b29..ce9ad4adf5fe31e9282fd4608182eb063bd26412 100644 --- a/binarycpython/tests/test_grid_options_defaults.py +++ b/binarycpython/tests/test_grid_options_defaults.py @@ -16,6 +16,7 @@ from binarycpython.utils.grid_options_defaults import ( TMP_DIR = temp_dir("tests", "test_grid_options_defaults") + class test_grid_options_defaults(unittest.TestCase): """ Unit tests for the grid_options_defaults module @@ -77,15 +78,11 @@ class test_grid_options_defaults(unittest.TestCase): Unit tests for the grid_options_description_checker function """ - input_1 = os.path.join( - TMP_DIR, "test_write_grid_options_to_rst_file_1.txt" - ) + input_1 = os.path.join(TMP_DIR, "test_write_grid_options_to_rst_file_1.txt") output_1 = write_grid_options_to_rst_file(input_1) self.assertIsNone(output_1) - input_2 = os.path.join( - TMP_DIR, "test_write_grid_options_to_rst_file_2.rst" - ) + input_2 = os.path.join(TMP_DIR, "test_write_grid_options_to_rst_file_2.rst") output_2 = write_grid_options_to_rst_file(input_2) self.assertTrue(os.path.isfile(input_2)) diff --git a/binarycpython/tests/test_notebooks.py b/binarycpython/tests/test_notebooks.py index 8d955ee851b4ae57e8754ce63acd96703d2bbd1d..135e8df72b245d7d2177171ac01036a4dbc054aa 100644 --- a/binarycpython/tests/test_notebooks.py +++ b/binarycpython/tests/test_notebooks.py @@ -8,8 +8,11 @@ from nbconvert.preprocessors import ExecutePreprocessor from binarycpython.utils.functions import temp_dir -TMP_DIR = temp_dir('testing', 'test_notebooks') -NOTEBOOKS_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '../../../examples')) +TMP_DIR = temp_dir("testing", "test_notebooks") +NOTEBOOKS_DIR = os.path.abspath( + os.path.join(os.path.abspath(__file__), "../../../examples") +) + def run_notebook(notebook_path): """ @@ -23,70 +26,114 @@ def run_notebook(notebook_path): with open(notebook_path) as f: nb = nbformat.read(f, as_version=4) - proc = ExecutePreprocessor(timeout=600, kernel_name='python3') + proc = ExecutePreprocessor(timeout=600, kernel_name="python3") proc.allow_errors = True - proc.preprocess(nb, {'metadata': {'path': '/'}}) + proc.preprocess(nb, {"metadata": {"path": "/"}}) - output_path = os.path.join(TMP_DIR, '{}_all_output.ipynb'.format(nb_name)) + output_path = os.path.join(TMP_DIR, "{}_all_output.ipynb".format(nb_name)) - with open(output_path, mode='wt') as f: + with open(output_path, mode="wt") as f: nbformat.write(nb, f) errors = [] for cell in nb.cells: - if 'outputs' in cell: - for output in cell['outputs']: - if output.output_type == 'error': + if "outputs" in cell: + for output in cell["outputs"]: + if output.output_type == "error": errors.append(output) return nb, errors + class TestNotebook(unittest.TestCase): """ Class that contains the notebook test calls """ def test_notebook_api_functionality(self): - notebook_name = 'notebook_api_functionality.ipynb' + notebook_name = "notebook_api_functionality.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) def test_notebook_population(self): - notebook_name = 'notebook_population.ipynb' + notebook_name = "notebook_population.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) def test_notebook_individual_systems(self): - notebook_name = 'notebook_individual_systems.ipynb' + notebook_name = "notebook_individual_systems.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) def test_notebook_custom_logging(self): - notebook_name = 'notebook_custom_logging.ipynb' + notebook_name = "notebook_custom_logging.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) def test_notebook_extra_features(self): - notebook_name = 'notebook_extra_features.ipynb' + notebook_name = "notebook_extra_features.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) def test_notebook_luminosity_function_single(self): - notebook_name = 'notebook_luminosity_function_single.ipynb' + notebook_name = "notebook_luminosity_function_single.ipynb" full_notebook_path = os.path.join(NOTEBOOKS_DIR, notebook_name) nb, errors = run_notebook(full_notebook_path) - msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join(["{}: {}\n{}".format(el['ename'], el['evalue'], '\n'.join(el['traceback'])) for el in errors]) + msg = "\nNotebook: {}\n\n".format(notebook_name) + "\n".join( + [ + "{}: {}\n{}".format( + el["ename"], el["evalue"], "\n".join(el["traceback"]) + ) + for el in errors + ] + ) self.assertEqual(errors, [], msg=msg) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/binarycpython/tests/test_plot_functions.py b/binarycpython/tests/test_plot_functions.py index 3e103e6b732a9ee9460b072ff700a7beb47cddd6..34c730fe0c80072c3de99a649af01b5a7d04de82 100644 --- a/binarycpython/tests/test_plot_functions.py +++ b/binarycpython/tests/test_plot_functions.py @@ -10,8 +10,6 @@ from binarycpython.utils.plot_functions import * from binarycpython.utils.functions import Capturing - - class test_color_by_index(unittest.TestCase): """ Unittests for function color_by_index diff --git a/binarycpython/tests/test_spacing_functions.py b/binarycpython/tests/test_spacing_functions.py index 7920a63b18196afdce95f8db37a5860be54cbc99..51f6edc64b1309fe28e1c66b21bbf15683e0bbda 100644 --- a/binarycpython/tests/test_spacing_functions.py +++ b/binarycpython/tests/test_spacing_functions.py @@ -25,7 +25,7 @@ class test_spacing_functions(unittest.TestCase): const_return = const(1, 10, 10) self.assertTrue( - (const_return == np.linspace(1, 10, 10+1)).all(), + (const_return == np.linspace(1, 10, 10 + 1)).all(), msg="Output didn't contain SINGLE_STAR_LIFETIME", ) diff --git a/binarycpython/utils/custom_logging_functions.py b/binarycpython/utils/custom_logging_functions.py index 949ba3c2916292c0b026baee21d87c20168e6484..12d7632135c03d2081f0f486b73e391dee459c84 100644 --- a/binarycpython/utils/custom_logging_functions.py +++ b/binarycpython/utils/custom_logging_functions.py @@ -52,7 +52,7 @@ def autogen_C_logging_code(logging_dict: dict, verbosity: int = 0) -> Optional[s for key in logging_dict: verbose_print( "Generating Print statement for custom logging code with {} as a header".format( - key + key ), verbosity, 1, @@ -306,7 +306,7 @@ def return_compilation_dict(verbosity: int = 0) -> dict: # verbose_print( "Got options to compile:\n\tcc = {cc}\n\tccflags = {ccflags}\n\tld = {ld}\n\tlibs = {libs}\n\tinc = {inc}\n\n".format( - cc=cc, ccflags=ccflags, ld=ld, libs=libs, inc=inc + cc=cc, ccflags=ccflags, ld=ld, libs=libs, inc=inc ), verbosity, 1, @@ -358,7 +358,7 @@ def compile_shared_lib( # Execute compilation and create the library verbose_print( "Building shared library for custom logging with (binary_c.h) on {}\n".format( - socket.gethostname() + socket.gethostname() ), verbosity, 1, @@ -380,6 +380,7 @@ def compile_shared_lib( 1, ) + def create_and_load_logging_function( custom_logging_code: str, verbosity: int = 0, custom_tmp_dir=None ) -> Tuple[int, str]: diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py index 334931a27b1728fb6741d77aebb4eb67e13a18f1..bc0eb313eeba67a94227cabd5ec025352a6b467d 100644 --- a/binarycpython/utils/distribution_functions.py +++ b/binarycpython/utils/distribution_functions.py @@ -944,8 +944,8 @@ def cosmic_SFH_madau_dickinson2014(z): # known value if we try to interpolate outside of the tables. # There are still some open tasks and improvements that can be made: # -# TODO: Solve the memory issues that are present. -# Are the interpolators not cleaned? +# TODO: Solve the memory issues that are present. +# Are the interpolators not cleaned? # TODO: Parallelize the setting up of the interpolators # TODO: Generalise the code such that we can input other/newer tables @@ -956,6 +956,7 @@ import py_rinterpolate # Global dictionary to store values in Moecache = {} + def poisson(lambda_val, n, nmax=None, verbosity=0): """ Function that calculates the Poisson value and normalises @@ -1010,6 +1011,7 @@ def _poisson(lambda_val, n): return (lambda_val ** n) * np.exp(-lambda_val) / (1.0 * math.factorial(n)) + def get_max_multiplicity(multiplicity_array): """ Function to get the maximum multiplicity @@ -1021,6 +1023,7 @@ def get_max_multiplicity(multiplicity_array): max_multiplicity = n + 1 return max_multiplicity + def merge_multiplicities(result_array, max_multiplicity, verbosity=0): """ Function to fold the multiplicities higher than the max_multiplicity onto the max_multiplicity @@ -1046,21 +1049,26 @@ def merge_multiplicities(result_array, max_multiplicity, verbosity=0): # Fold multiplicities: verbose_print( - "\tM&S: merge_multiplicities: Merging multiplicities with initial array {} and max multiplicity {}".format(result_array, max_multiplicity), + "\tM&S: merge_multiplicities: Merging multiplicities with initial array {} and max multiplicity {}".format( + result_array, max_multiplicity + ), verbosity, _MS_VERBOSITY_LEVEL, ) for i in range(max_multiplicity, len(result_array))[::-1]: - result_array[i-1] += result_array[i] + result_array[i - 1] += result_array[i] result_array[i] = 0 verbose_print( - "\tM&S: merge_multiplicities: Merging multiplicities to new array {}".format(result_array), + "\tM&S: merge_multiplicities: Merging multiplicities to new array {}".format( + result_array + ), verbosity, _MS_VERBOSITY_LEVEL, ) return result_array + def normalize_dict(result_dict, verbosity=0): """ Function to normalise a dictionary @@ -1071,6 +1079,7 @@ def normalize_dict(result_dict, verbosity=0): result_dict[key] = result_dict[key] / sum_result return result_dict + def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): """ Function that creates a list of probability fractions and @@ -1088,11 +1097,15 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): # Use the global Moecache global Moecache - multiplicity_modulator_array = np.array(options["multiplicity_modulator"]) # Modulator array + multiplicity_modulator_array = np.array( + options["multiplicity_modulator"] + ) # Modulator array # Check for length - if not len(multiplicity_modulator_array)==4: - msg = "Multiplicity modulator has to have 4 elements. Now it is {}, len: {}".format(multiplicity_modulator_array, len(multiplicity_modulator_array)) + if not len(multiplicity_modulator_array) == 4: + msg = "Multiplicity modulator has to have 4 elements. Now it is {}, len: {}".format( + multiplicity_modulator_array, len(multiplicity_modulator_array) + ) verbose_print( msg, verbosity, @@ -1101,8 +1114,10 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): raise ValueError(msg) # Set up some arrays - full_fractions_array = np.zeros(4) # Meant to contain the real fractions - weighted_fractions_array = np.zeros(4) # Meant to contain the fractions multiplied by the multiplicity modulator + full_fractions_array = np.zeros(4) # Meant to contain the real fractions + weighted_fractions_array = np.zeros( + 4 + ) # Meant to contain the fractions multiplied by the multiplicity modulator # Get max multiplicity max_multiplicity = get_max_multiplicity(multiplicity_modulator_array) @@ -1130,10 +1145,12 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): full_fractions_array[n] = poisson(multiplicity, n, 3, verbosity) # Normalize it so it fills to one when taking all the multiplicities: - full_fractions_array = full_fractions_array/np.sum(full_fractions_array) + full_fractions_array = full_fractions_array / np.sum(full_fractions_array) verbose_print( - "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format("Poisson", full_fractions_array), + "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( + "Poisson", full_fractions_array + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1146,15 +1163,16 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): # Fill with the raw values for n in range(3): - full_fractions_array[n] = ( - Moecache["rinterpolator_multiplicity"].interpolate( - [np.log10(options["M_1"])] - )[n + 1]) + full_fractions_array[n] = Moecache[ + "rinterpolator_multiplicity" + ].interpolate([np.log10(options["M_1"])])[n + 1] # Set last value full_fractions_array[3] = 0.0 # no quadruples verbose_print( - "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format("data", full_fractions_array), + "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( + "data", full_fractions_array + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1163,7 +1181,9 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): if options["normalize_multiplicities"] == "raw": # Don't multiply by the multiplicity_array, but do give a fractions array verbose_print( - "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Not normalising (using raw results): results: {}".format(full_fractions_array), + "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Not normalising (using raw results): results: {}".format( + full_fractions_array + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1174,10 +1194,12 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): weighted_fractions_array = full_fractions_array * multiplicity_modulator_array # Normalise this so it is in total 1: - result = weighted_fractions_array/np.sum(weighted_fractions_array) + result = weighted_fractions_array / np.sum(weighted_fractions_array) verbose_print( - "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}. result: {}".format("norm", result), + "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}. result: {}".format( + "norm", result + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1185,13 +1207,17 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): elif options["normalize_multiplicities"] == "merge": # We first take the full multiplicity array # (i.e. not multiplied by multiplier) and do the merging - result = merge_multiplicities(full_fractions_array, max_multiplicity, verbosity=verbosity) + result = merge_multiplicities( + full_fractions_array, max_multiplicity, verbosity=verbosity + ) # Then normalise to be sure - result = result/np.sum(result) + result = result / np.sum(result) verbose_print( - "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}, max_multiplicity={} result={}".format("merge", max_multiplicity, result), + "\tM&S: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}, max_multiplicity={} result={}".format( + "merge", max_multiplicity, result + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1205,6 +1231,7 @@ def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): # return array reference return result + def build_q_table(options, m, p, verbosity=0): ############################################################ # @@ -1269,7 +1296,7 @@ def build_q_table(options, m, p, verbosity=0): # 0 # ] # TODO: this lower range must not be lower than Mmin. - qmin = options['Mmin']/options['M_1'] + qmin = options["Mmin"] / options["M_1"] verbose_print( "\tM&S: build_q_table qmin: {}".format( qmin, @@ -1445,13 +1472,15 @@ def build_q_table(options, m, p, verbosity=0): verbosity, _MS_VERBOSITY_LEVEL, ) - if pre=='low': - below_qlimit = qlimit-qeps + if pre == "low": + below_qlimit = qlimit - qeps if below_qlimit > 0: qdata[below_qlimit] = 0 qdata[0] = 0 verbose_print( - "\tM&S: build_q_table: using linear extrapolation and setting the points below the lower q bound ({}) to 0 ".format(qlimit), + "\tM&S: build_q_table: using linear extrapolation and setting the points below the lower q bound ({}) to 0 ".format( + qlimit + ), verbosity, _MS_VERBOSITY_LEVEL, ) @@ -1474,14 +1503,16 @@ def build_q_table(options, m, p, verbosity=0): verbosity, _MS_VERBOSITY_LEVEL, ) - elif method=="poly": + elif method == "poly": # TODO: consider implementing the poly method (see Perl version) - raise ValueError("M&S: build_q_table: Method 'poly' not implemented") + raise ValueError( + "M&S: build_q_table: Method 'poly' not implemented" + ) else: msg = "\tM&S: build_q_table: Error no other methods available. The chosen method ({}) does not exist!".format( - method - ) + method + ) verbose_print( msg, verbosity, @@ -1515,14 +1546,18 @@ def build_q_table(options, m, p, verbosity=0): ) # now we integrate and renormalise (if the table is not all zero) - I = get_integration_constant_q(q_interpolator, tmp_table, qdata, verbosity=verbosity) + I = get_integration_constant_q( + q_interpolator, tmp_table, qdata, verbosity=verbosity + ) if I > 0: # normalise to 1.0 by dividing the data by 1.0/$I q_interpolator.multiply_table_column(1, 1.0 / I) # test this - new_I = get_integration_constant_q(q_interpolator, tmp_table, qdata, verbosity=verbosity) + new_I = get_integration_constant_q( + q_interpolator, tmp_table, qdata, verbosity=verbosity + ) # fail if error in integral > 1e-6 (should be ~ machine precision) if abs(1.0 - new_I) > 1e-6: @@ -1615,8 +1650,8 @@ def get_integration_constant_q(q_interpolator, tmp_table, qdata, verbosity=0): x = q_interpolator.interpolate([q]) if len(x) == 0: msg = "\tM&S: build_q_table: Q interpolator table interpolation failed.\n\t\ttmp_table = {}\n\t\tq_data = {}".format( - str(tmp_table), str(qdata) - ) + str(tmp_table), str(qdata) + ) verbose_print( msg, verbosity, @@ -1660,7 +1695,12 @@ def fill_data(sample_values, data_dict): def calc_e_integral( - options, integrals_string, interpolator_name, mass_string, period_string, verbosity=0 + options, + integrals_string, + interpolator_name, + mass_string, + period_string, + verbosity=0, ): """ Function to calculate the P integral @@ -1695,7 +1735,15 @@ def calc_e_integral( Moecache[integrals_string][mass_period_string] = I verbose_print( "\tM&S: calc_ecc_integral: min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( - min_ecc, max_ecc, integrals_string, interpolator_name, mass_string, period_string, options[mass_string], options[period_string], I + min_ecc, + max_ecc, + integrals_string, + interpolator_name, + mass_string, + period_string, + options[mass_string], + options[period_string], + I, ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1703,7 +1751,15 @@ def calc_e_integral( else: verbose_print( "\tM&S: calc_ecc_integral: Found cached value for min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( - min_ecc, max_ecc, integrals_string, interpolator_name, mass_string, period_string, options[mass_string], options[period_string], Moecache[integrals_string][mass_period_string] + min_ecc, + max_ecc, + integrals_string, + interpolator_name, + mass_string, + period_string, + options[mass_string], + options[period_string], + Moecache[integrals_string][mass_period_string], ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1743,7 +1799,12 @@ def calc_P_integral( Moecache[integrals_string][options[mass_string]] = I verbose_print( "\tM&S: calc_P_integral: min_P: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( - min_P, integrals_string, interpolator_name, mass_string, options[mass_string], I + min_P, + integrals_string, + interpolator_name, + mass_string, + options[mass_string], + I, ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1751,12 +1812,18 @@ def calc_P_integral( else: verbose_print( "\tM&S: calc_P_integral: Found cached value for min_P: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( - min_P, integrals_string, interpolator_name, mass_string, options[mass_string], Moecache[integrals_string][options[mass_string]] + min_P, + integrals_string, + interpolator_name, + mass_string, + options[mass_string], + Moecache[integrals_string][options[mass_string]], ), verbosity, _MS_VERBOSITY_LEVEL, ) + def calc_total_probdens(prob_dict): """ Function to calculate the total probability density @@ -1765,10 +1832,11 @@ def calc_total_probdens(prob_dict): total_probdens = 1 for key in prob_dict: total_probdens *= prob_dict[key] - prob_dict['total_probdens'] = total_probdens + prob_dict["total_probdens"] = total_probdens return prob_dict + def Moe_di_Stefano_2017_pdf(options, verbosity=0): """ Moe & diStefano function to calculate the probability density. @@ -1791,7 +1859,9 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): _MS_VERBOSITY_LEVEL, ) - prob_dict = {} # Dictionary containing all the pdf values for the different parameters + prob_dict = ( + {} + ) # Dictionary containing all the pdf values for the different parameters # Get the multiplicity from the options, and if its not there, calculate it based on the # TODO: the function below makes no sense. We NEED to pass the multiplicity in the @@ -1822,11 +1892,13 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # multiplicity fraction # Calculate the probability, or rather, fraction, of stars that belong to this mass - multiplicity_probability = Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity)[multiplicity - 1] - prob_dict['multiplicity'] = multiplicity_probability + multiplicity_probability = Moe_di_Stefano_2017_multiplicity_fractions( + options, verbosity + )[multiplicity - 1] + prob_dict["multiplicity"] = multiplicity_probability verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended multiplicity (mass1 = {}) probability ({}) to the prob dict ({})".format( - options["M_1"], prob_dict['multiplicity'], prob_dict + options["M_1"], prob_dict["multiplicity"], prob_dict ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1841,10 +1913,10 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # TODO: Create an n-part-powerlaw method that can have breakpoints and slopes. I'm using a three-part power law now. # TODO: is this actually the correct way? putting the M1 in there? Do we sample in log space? M1_probability = Kroupa2001(options["M_1"]) * options["M_1"] - prob_dict['M_1'] = M1_probability + prob_dict["M_1"] = M1_probability verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended Mass (m={}) probability ({}) to the prob dict ({})".format( - options["M_1"], prob_dict['M_1'], prob_dict + options["M_1"], prob_dict["M_1"], prob_dict ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1879,7 +1951,11 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): ) # Make a table storing Moe's data for q distributions - if options.get("M_2", None) or options.get("M_3", None) or options.get("M_4", None): + if ( + options.get("M_2", None) + or options.get("M_3", None) + or options.get("M_4", None) + ): if not Moecache.get("rinterpolator_q", None): Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate( table=Moecache["q_distributions"], # Contains the table of data @@ -1897,10 +1973,12 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # Make a table storing Moe's data for q distributions, but only if the ecc is actually sampled if "ecc" in options: - if not options['ecc']==None: + if not options["ecc"] == None: if not Moecache.get("rinterpolator_e", None): Moecache["rinterpolator_e"] = py_rinterpolate.Rinterpolate( - table=Moecache["ecc_distributions"], # Contains the table of data + table=Moecache[ + "ecc_distributions" + ], # Contains the table of data nparams=3, # log10M, log10P, e ndata=1, # verbosity=verbosity - (_MS_VERBOSITY_INTERPOLATOR_LEVEL - 1), @@ -1936,10 +2014,10 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): [np.log10(options["M_1"]), np.log10(options["P"])] )[0] p_val = p_val / Moecache["P_integrals"][options["M_1"]] - prob_dict['P'] = p_val + prob_dict["P"] = p_val verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended period (m={}, P={}) probability ({}) to the prob list ({})".format( - options["M_1"], options["P"], prob_dict['P'], prob_dict + options["M_1"], options["P"], prob_dict["P"], prob_dict ), verbosity, _MS_VERBOSITY_LEVEL, @@ -1964,24 +2042,28 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # Construct the q table build_q_table(options, m_label, p_label, verbosity=verbosity) verbose_print( - "\tM&S: Moe_di_Stefano_2017_pdf: Created q_table ({}) for m={} p={}".format(Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) - ], options[m_label], options[p_label]), + "\tM&S: Moe_di_Stefano_2017_pdf: Created q_table ({}) for m={} p={}".format( + Moecache[ + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + ], + options[m_label], + options[p_label], + ), verbosity, _MS_VERBOSITY_LEVEL, ) # Add probability for the mass ratio q_prob = Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) - ].interpolate([secondary_mass / primary_mass])[0] - prob_dict['q'] = q_prob + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + ].interpolate([secondary_mass / primary_mass])[0] + prob_dict["q"] = q_prob verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: appended mass ratio (M={} P={} q={}) probability ({}) to the prob list ({}) ".format( options["M_1"], options["P"], options["M_2"] / options["M_1"], - prob_dict['q'], + prob_dict["q"], prob_dict, ), verbosity, @@ -1997,7 +2079,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # Only do this if the eccentricity is sampled if "ecc" in options: - if not options['ecc']==None: + if not options["ecc"] == None: # Calculate ecc integral or use cached value calc_e_integral( options, "ecc_integrals", "rinterpolator_e", "M_1", "P", verbosity @@ -2009,10 +2091,14 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): [np.log10(options["M_1"]), np.log10(options["P"]), options["ecc"]] )[0] ecc_val = ecc_val / Moecache["ecc_integrals"][mass_period_string] - prob_dict['ecc'] = ecc_val + prob_dict["ecc"] = ecc_val verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended eccentricity (m={}, P={}, ecc={}) probability ({}) to the prob list ({})".format( - options["M_1"], options["P"], options["ecc"], prob_dict['ecc'], prob_dict + options["M_1"], + options["P"], + options["ecc"], + prob_dict["ecc"], + prob_dict, ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2042,7 +2128,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): if options["P2"] < min_P2: # period is too short : system is not hierarchical - prob_dict['P2'] = 0 + prob_dict["P2"] = 0 verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: period2 is too short: {} < {}, system is not hierarchical. Added 0 to probability list".format( options["P1"], min_P2 @@ -2077,10 +2163,14 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): [np.log10(options["M_1+M_2"]), np.log10(options["P2"])] )[0] p_val = p_val / Moecache["P2_integrals"][options["M_1+M_2"]] - prob_dict['P2'] = p_val + prob_dict["P2"] = p_val verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended period2 (m1={} m2={}, P2={}) probability ({}) to the prob list ({})".format( - options["M_1"], options["M_2"], options["P2"], prob_dict['P2'], prob_dict + options["M_1"], + options["M_2"], + options["P2"], + prob_dict["P2"], + prob_dict, ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2113,14 +2203,14 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): q2_val = Moecache[ "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) ].interpolate([secondary_mass / primary_mass])[0] - prob_dict['q2'] = q2_val + prob_dict["q2"] = q2_val verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: appended mass ratio (M_1+M_2={} M_3={} P={} q={}) probability ({}) to the prob list ({}) ".format( options["M_1+M_2"], options["M_3"], options["P"], secondary_mass / primary_mass, - prob_dict['q2'], + prob_dict["q2"], prob_dict, ), verbosity, @@ -2164,10 +2254,14 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): [np.log10(options["M_1+M_2"]), np.log10(options["P2"])] )[0] p_val = p_val / Moecache["P2_integrals"][options["M_1+M_2"]] - prob_dict['P3'] = p_val + prob_dict["P3"] = p_val verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: Appended period2 (M=4) (M_1={} M_2={}, P2={}) probability ({}) to the prob list ({})".format( - options["M_1"], options["M_2"], options["P2"], prob_dict['P3'], prob_dict + options["M_1"], + options["M_2"], + options["P2"], + prob_dict["P3"], + prob_dict, ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2199,16 +2293,16 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # Add the probability q3_prob = Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) ].interpolate([secondary_mass / primary_mass])[0] - prob_dict['q3'] = q3_prob + prob_dict["q3"] = q3_prob verbose_print( "\tM&S: Moe_di_Stefano_2017_pdf: appended mass ratio (M_1+M_2={} M_3={} P={} q={}) probability ({}) to the prob list ({}) ".format( options["M_1+M_2"], options["M_3"], options["P"], secondary_mass / primary_mass, - prob_dict['q3'], + prob_dict["q3"], prob_dict, ), verbosity, @@ -2223,8 +2317,8 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): # check for input of multiplicity elif multiplicity not in range(1, 5): msg = "\tM&S: Moe_di_Stefano_2017_pdf: Unknown multiplicity {}".format( - multiplicity - ) + multiplicity + ) verbose_print( msg, verbosity, @@ -2242,7 +2336,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): options["M_1"], len(prob_dict), str(prob_dict), - prob_dict['total_probdens'] + prob_dict["total_probdens"], ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2253,10 +2347,10 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): options["M_1"], options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", np.log10(options["P"]), - options['ecc'] if options.get("ecc", None) else "N/A", + options["ecc"] if options.get("ecc", None) else "N/A", len(prob_dict), str(prob_dict), - prob_dict['total_probdens'] + prob_dict["total_probdens"], ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2273,7 +2367,7 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): options["ecc2"] if options.get("ecc2", None) else "N/A", len(prob_dict), str(prob_dict), - prob_dict['total_probdens'] + prob_dict["total_probdens"], ), verbosity, _MS_VERBOSITY_LEVEL, @@ -2284,14 +2378,16 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): options["M_1"], options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", np.log10(options["P"]), - options['ecc'] if options.get("ecc", None) else "N/A", + options["ecc"] if options.get("ecc", None) else "N/A", options["M_3"], np.log10(options["P2"]), options["ecc2"] if options.get("ecc2", None) else "N/A", options["M_4"], np.log10(options["P3"]), options["ecc3"] if options.get("ecc3", None) else "N/A", - len(prob_dict), str(prob_dict), prob_dict['total_probdens'] + len(prob_dict), + str(prob_dict), + prob_dict["total_probdens"], ), verbosity, _MS_VERBOSITY_LEVEL, diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py index dfa36e9376383fb3850ba8fb71a6a8be45368653..9c304b5577e0d3ae850314cc51bb15d45810f6de 100644 --- a/binarycpython/utils/functions.py +++ b/binarycpython/utils/functions.py @@ -43,6 +43,7 @@ import py_rinterpolate # Unsorted ######################################################## + def format_number(number): # compact number formatter @@ -52,6 +53,7 @@ def format_number(number): return string + def check_if_in_shell(): """ Function to check whether the script is running from a shell @@ -64,28 +66,29 @@ def check_if_in_shell(): return in_shell + def ANSI_colours(self): # ANSI colours dictionary foreground_colours = { - 'red':Fore.RED, - 'yellow':Fore.YELLOW, - 'blue':Fore.BLUE, - 'cyan':Fore.CYAN, - 'green':Fore.GREEN, - 'magenta':Fore.MAGENTA, - 'white':Fore.WHITE, - 'black':Fore.BLACK, + "red": Fore.RED, + "yellow": Fore.YELLOW, + "blue": Fore.BLUE, + "cyan": Fore.CYAN, + "green": Fore.GREEN, + "magenta": Fore.MAGENTA, + "white": Fore.WHITE, + "black": Fore.BLACK, } background_colours = { - 'red':Back.RED, - 'yellow':Back.YELLOW, - 'blue':Back.BLUE, - 'cyan':Back.CYAN, - 'green':Back.GREEN, - 'magenta':Back.MAGENTA, - 'white':Back.WHITE, - 'black':Back.BLACK, + "red": Back.RED, + "yellow": Back.YELLOW, + "blue": Back.BLUE, + "cyan": Back.CYAN, + "green": Back.GREEN, + "magenta": Back.MAGENTA, + "white": Back.WHITE, + "black": Back.BLACK, } default_style = Style.BRIGHT @@ -94,11 +97,12 @@ def ANSI_colours(self): for c in foreground_colours: colours[c] = default_style + foreground_colours[c] for d in background_colours: - colours[c + ' on ' + d] = foreground_colours[c] + background_colours[d] - colours['reset'] = Style.RESET_ALL + colours[c + " on " + d] = foreground_colours[c] + background_colours[d] + colours["reset"] = Style.RESET_ALL return colours + def mem_use(): """ Return current process memory use in MB. (Takes no arguments) Note: this is per-thread only. @@ -106,28 +110,32 @@ def mem_use(): return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 + def trem(dt, count, dn, n): """ Estimate time remaining (seconds) given a differential time and count (i.e. progress = $count/$n). $dt is the time since the last call, $count is the current progress count, $dn is the number run since the last call, and $n is the total number required. """ - tpr = dt / max(1,dn) + tpr = dt / max(1, dn) etasecs = tpr * (n - count) (eta, units) = conv_time_units(etasecs) return (eta, units, tpr, etasecs) + def conv_time_units(t): """ Converts time (t, in seconds, passing in as the only argument) to seconds, minutes or hours depending on its magnitude. Returns a tuple (t,units). """ - units='s'; # default to seconds + units = "s" + # default to seconds if t > 60: - t /= 60; - units='m'; + t /= 60 + units = "m" if t > 60: - t /=60; - units='h'; + t /= 60 + units = "h" return (t, units) + class AutoVivificationDict(dict): """ Implementation of perl's autovivification feature, by overriding the @@ -157,6 +165,7 @@ class AutoVivificationDict(dict): self = other return self + def bin_data(value, binwidth): """ Function that bins the data @@ -164,20 +173,22 @@ def bin_data(value, binwidth): Uses the absolute value of binwidth """ - return ((0.5 if value > 0.0 else -0.5) + int(value/abs(binwidth))) * abs(binwidth) + return ((0.5 if value > 0.0 else -0.5) + int(value / abs(binwidth))) * abs(binwidth) + def convert_bytes(size): """ Function to return the size + a magnitude string """ - for name in ['bytes', 'KB', 'MB', 'GB', 'TB']: + for name in ["bytes", "KB", "MB", "GB", "TB"]: if size < 1024.0: return "%3.1f %s" % (size, name) size /= 1024.0 return size + def get_size(obj, seen=None): """ Recursively finds size of objects @@ -197,9 +208,9 @@ def get_size(obj, seen=None): if isinstance(obj, dict): size += sum([get_size(v, seen) for v in obj.values()]) size += sum([get_size(k, seen) for k in obj.keys()]) - elif hasattr(obj, '__dict__'): + elif hasattr(obj, "__dict__"): size += get_size(obj.__dict__, seen) - elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): size += sum([get_size(i, seen) for i in obj]) return size @@ -214,7 +225,9 @@ def format_ensemble_results(ensemble_dictionary): original_ensemble_results = ensemble_dictionary - float_format_ensemble_results = recursive_change_key_to_float(original_ensemble_results) + float_format_ensemble_results = recursive_change_key_to_float( + original_ensemble_results + ) del original_ensemble_results gc.collect() @@ -224,7 +237,9 @@ def format_ensemble_results(ensemble_dictionary): gc.collect() # Then Change the keys back to a string but with a %g format. - reformatted_ensemble_results = recursive_change_key_to_string(sorted_ensemble_results) + reformatted_ensemble_results = recursive_change_key_to_string( + sorted_ensemble_results + ) del sorted_ensemble_results gc.collect() @@ -422,7 +437,6 @@ def imports(): yield val.__name__ - def convfloat(x): """ Convert scalar x to a float if we can, in which case return the float, otherwise just return x without changing it. Usually, x is a string, but could be anything that float() can handle without failure. @@ -433,8 +447,8 @@ def convfloat(x): except ValueError: return x -def datalinedict(line : str, - parameters : list): + +def datalinedict(line: str, parameters: list): """ Convert a line of data to a more convenient dictionary. Arguments: @@ -444,29 +458,30 @@ def datalinedict(line : str, Note: if the parameter is a floating point number, it will be converted to Python's float type. """ - return {param:convfloat(value) for param, value in zip(parameters, line.split())} + return {param: convfloat(value) for param, value in zip(parameters, line.split())} -def pad_output_distribution(dist : dict, - binwidth : float): + +def pad_output_distribution(dist: dict, binwidth: float): """ Given a distribution, dist (a dictionary), which should be binned every binwidth (float), fill the distribution with zeros when there is no data. Note: this changes the data in place. """ # sorted list of the keys - skeys = sorted(dist.keys(), key = lambda x: float(x)) + skeys = sorted(dist.keys(), key=lambda x: float(x)) # get min and max, offset by the binwidth - min_val = skeys[ 0] - binwidth + min_val = skeys[0] - binwidth max_val = skeys[-1] + binwidth # pad with zeros x = min_val while x <= max_val: - dist[x] = dist.setdefault(x,0.0) + dist[x] = dist.setdefault(x, 0.0) x += binwidth return dist + class catchtime(object): """ Context manager to calculate time spent @@ -511,6 +526,7 @@ class Capturing(list): del self._stringio # free up some memory sys.stdout = self._stdout + def call_binary_c_config(argument): """ Function to interface with the binary_c config file @@ -532,10 +548,9 @@ def call_binary_c_config(argument): msg = "binary_c-config file does not exist. Aborting" raise ValueError(msg) - output = ( - subprocess.run([BINARY_C_CONFIG, argument], stdout=subprocess.PIPE, check=True) - .stdout.decode("utf-8") - ) + output = subprocess.run( + [BINARY_C_CONFIG, argument], stdout=subprocess.PIPE, check=True + ).stdout.decode("utf-8") return output @@ -561,6 +576,7 @@ def verbose_print(message: str, verbosity: int, minimal_verbosity: int) -> None: if verbosity >= minimal_verbosity: print(message) + def remove_file(file: str, verbosity: int = 0) -> None: """ Function to remove files but with verbosity @@ -597,6 +613,7 @@ def remove_file(file: str, verbosity: int = 0) -> None: def get_username(): return psutil.Process().username() + def temp_dir(*args: str) -> str: """ Function to create directory within the TMP directory of the file system @@ -624,6 +641,7 @@ def temp_dir(*args: str) -> str: return path + def create_hdf5(data_dir: str, name: str) -> None: """ Function to create an hdf5 file from the contents of a directory: @@ -857,14 +875,16 @@ def parse_binary_c_version_info(version_info_string: str) -> dict: split_info = el.split("Ensemble ")[-1].split(" is ") if len(split_info) > 1: - if not split_info[0].startswith('filter'): + if not split_info[0].startswith("filter"): ensemble_dict[int(split_info[0])] = split_info[-1] else: - filter_no = int(split_info[0].replace('filter ', '')) + filter_no = int(split_info[0].replace("filter ", "")) ensemble_filter_dict[filter_no] = split_info[-1] version_info_dict["ensembles"] = ensemble_dict if ensemble_dict else None - version_info_dict["ensemble_filters"] = ensemble_filter_dict if ensemble_filter_dict else None + version_info_dict["ensemble_filters"] = ( + ensemble_filter_dict if ensemble_filter_dict else None + ) ########################## # macros: @@ -1093,7 +1113,9 @@ def example_parse_output(output: str, selected_header: str) -> dict: if len(value_dicts) == 0: print( - "Sorry, didn't find any line matching your header {}".format(selected_header) + "Sorry, didn't find any line matching your header {}".format( + selected_header + ) ) return None @@ -1675,7 +1697,7 @@ def inspect_dict( type(input_dict[key]) (except if the value is a dict) """ - structure_dict = OrderedDict() # TODO: check if this still works + structure_dict = OrderedDict() # TODO: check if this still works # for key, value in input_dict.items(): @@ -1730,7 +1752,7 @@ def merge_dicts(dict_1: dict, dict_2: dict) -> dict: """ # Set up new dict - new_dict = OrderedDict() # TODO: check if this still necessary + new_dict = OrderedDict() # TODO: check if this still necessary # keys_1 = dict_1.keys() @@ -1768,11 +1790,17 @@ def merge_dicts(dict_1: dict, dict_2: dict) -> dict: # If they keys are not the same, it depends on their type whether we still deal with them at all, or just raise an error if not type(dict_1[key]) is type(dict_2[key]): # Exceptions: numbers can be added - if isinstance(dict_1[key], (int, float, np.float64)) and isinstance(dict_2[key], (int, float, np.float64)): + if isinstance(dict_1[key], (int, float, np.float64)) and isinstance( + dict_2[key], (int, float, np.float64) + ): new_dict[key] = dict_1[key] + dict_2[key] # Exceptions: versions of dicts can be merged - elif isinstance(dict_1[key], (dict, OrderedDict, type(AutoVivificationDict))) and isinstance(dict_2[key], (dict, OrderedDict, type(AutoVivificationDict))): + elif isinstance( + dict_1[key], (dict, OrderedDict, type(AutoVivificationDict)) + ) and isinstance( + dict_2[key], (dict, OrderedDict, type(AutoVivificationDict)) + ): new_dict[key] = merge_dicts(dict_1[key], dict_2[key]) # If the above cases have not dealt with it, then we should raise an error @@ -1810,7 +1838,9 @@ def merge_dicts(dict_1: dict, dict_2: dict) -> dict: new_dict[key] = dict_1[key] + dict_2[key] # Astropy quantities (using a dummy type representing the numpy array) - elif isinstance(dict_1[key], type(np.array([1])*u.m)) and isinstance(dict_2[key], type(np.array([1])*u.m)): + elif isinstance(dict_1[key], type(np.array([1]) * u.m)) and isinstance( + dict_2[key], type(np.array([1]) * u.m) + ): new_dict[key] = dict_1[key] + dict_2[key] # dicts @@ -1855,7 +1885,7 @@ def update_dicts(dict_1: dict, dict_2: dict) -> dict: """ # Set up new dict - new_dict = OrderedDict() # TODO: check if this still works + new_dict = OrderedDict() # TODO: check if this still works # keys_1 = dict_1.keys() @@ -1933,7 +1963,7 @@ def multiply_values_dict(input_dict, factor): """ for key in input_dict: - if not key == 'general_info': + if not key == "general_info": if isinstance(input_dict[key], (dict, OrderedDict)): input_dict[key] = multiply_values_dict(input_dict[key], factor) else: @@ -1942,6 +1972,7 @@ def multiply_values_dict(input_dict, factor): return input_dict + def custom_sort_dict(input_dict): """ Returns a dictionary that is ordered, but can handle numbers better than normal OrderedDict @@ -1971,7 +2002,7 @@ def custom_sort_dict(input_dict): msg = "Different types in the same dictionary key set" # Create a string repr of the type name to sort them afterwards - str_types = {repr(el):el for el in all_types_keys} + str_types = {repr(el): el for el in all_types_keys} # Set up sorted keys list sorted_keys = [] @@ -2001,15 +2032,19 @@ def recursive_change_key_to_float(input_dict): Does not work with lists as values """ - new_dict = OrderedDict() # TODO: check if this still works + new_dict = OrderedDict() # TODO: check if this still works for key in input_dict: if isinstance(input_dict[key], (dict, OrderedDict)): try: num_key = float(key) - new_dict[num_key] = recursive_change_key_to_float(copy.deepcopy(input_dict[key])) + new_dict[num_key] = recursive_change_key_to_float( + copy.deepcopy(input_dict[key]) + ) except ValueError: - new_dict[key] = recursive_change_key_to_float(copy.deepcopy(input_dict[key])) + new_dict[key] = recursive_change_key_to_float( + copy.deepcopy(input_dict[key]) + ) else: try: num_key = float(key) @@ -2025,15 +2060,19 @@ def recursive_change_key_to_string(input_dict): Function to recursively change the key back to a string but this time in a format that we decide """ - new_dict = OrderedDict() # TODO: check if this still works + new_dict = OrderedDict() # TODO: check if this still works for key in input_dict: if isinstance(input_dict[key], (dict, OrderedDict)): if isinstance(key, (int, float)): string_key = "{:g}".format(key) - new_dict[string_key] = recursive_change_key_to_string(copy.deepcopy(input_dict[key])) + new_dict[string_key] = recursive_change_key_to_string( + copy.deepcopy(input_dict[key]) + ) else: - new_dict[key] = recursive_change_key_to_string(copy.deepcopy(input_dict[key])) + new_dict[key] = recursive_change_key_to_string( + copy.deepcopy(input_dict[key]) + ) else: if isinstance(key, (int, float)): string_key = "{:g}".format(key) diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py index b3668ff1ead396e241a90296c4ecafea179ec98e..81ae2cae7534198a9f8cbeaf123b4f2a905b0b57 100644 --- a/binarycpython/utils/grid.py +++ b/binarycpython/utils/grid.py @@ -42,6 +42,7 @@ import setproctitle import py_rinterpolate from colorama import init as colorama_init + colorama_init() from binarycpython.utils.grid_options_defaults import ( @@ -81,7 +82,7 @@ from binarycpython.utils.functions import ( mem_use, ANSI_colours, check_if_in_shell, - format_number + format_number, ) # from binarycpython.utils.hpc_functions import ( @@ -103,7 +104,8 @@ from binarycpython.utils.distribution_functions import ( ) from binarycpython import _binary_c_bindings -secs_per_day = 86400 # probably needs to go somewhere more sensible +secs_per_day = 86400 # probably needs to go somewhere more sensible + class Population: """ @@ -145,12 +147,20 @@ class Population: self.set(**kwargs) # Load M&s options - self.grid_options['m&s_options'] = copy.deepcopy(moe_di_stefano_default_options) + self.grid_options["m&s_options"] = copy.deepcopy(moe_di_stefano_default_options) # Write M&S options to a file. NOTE: not sure why i put this here anymore - os.makedirs(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True) - with open(os.path.join(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), "moeopts.dat"), "w") as f: - f.write(json.dumps(self.grid_options['m&s_options'], indent=4)) + os.makedirs( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True + ) + with open( + os.path.join( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + "moeopts.dat", + ), + "w", + ) as f: + f.write(json.dumps(self.grid_options["m&s_options"], indent=4)) # Argline dict self.argline_dict = {} @@ -265,7 +275,7 @@ class Population: key, kwargs[key] ), self.grid_options["verbosity"], - 0, # NOTE: setting this to be 0 prevents mistakes being overlooked. + 0, # NOTE: setting this to be 0 prevents mistakes being overlooked. ) self.custom_options[key] = kwargs[key] @@ -298,7 +308,7 @@ class Population: ) # Grab the input and split them up, while accepting only non-empty entries - #cmdline_args = args + # cmdline_args = args self.grid_options["_commandline_input"] = cmdline_args # Make dict and fill it @@ -381,11 +391,7 @@ class Population: ): return grid_variable - def update_grid_variable( - self, - name: str, - **kwargs - ) -> None: + def update_grid_variable(self, name: str, **kwargs) -> None: """ Function to update the values of a grid variable. @@ -400,10 +406,12 @@ class Population: try: grid_variable = self.grid_options["_grid_variables"][name] except KeyError: - msg = "Unknown grid variable {} - please create it with the add_grid_variable() method.".format(name) + msg = "Unknown grid variable {} - please create it with the add_grid_variable() method.".format( + name + ) raise KeyError(msg) - for key,value in kwargs.items(): + for key, value in kwargs.items(): grid_variable[key] = value verbose_print( "Updated grid variable: {}".format(json.dumps(grid_variable, indent=4)), @@ -423,15 +431,12 @@ class Population: 1, ) except: - msg = "Failed to remove grid variable {} : please check it exists.".format(name) + msg = "Failed to remove grid variable {} : please check it exists.".format( + name + ) raise ValueError(msg) - - def rename_grid_variable( - self, - oldname: str, - newname: str - ) -> None: + def rename_grid_variable(self, oldname: str, newname: str) -> None: """ Function to rename a grid variable. @@ -449,15 +454,17 @@ class Population: """ try: - self.grid_options["_grid_variables"][newname] = self.grid_options["_grid_variables"].pop(oldname) + self.grid_options["_grid_variables"][newname] = self.grid_options[ + "_grid_variables" + ].pop(oldname) self.grid_options["_grid_variables"][newname]["name"] = newname verbose_print( - "Rename grid variable: {} to {}".format(oldname,newname), + "Rename grid variable: {} to {}".format(oldname, newname), self.grid_options["verbosity"], 1, ) except: - msg = "Failed to rename grid variable {} to {}.".format(oldname,newname) + msg = "Failed to rename grid variable {} to {}.".format(oldname, newname) raise ValueError(msg) def add_grid_variable( @@ -565,7 +572,7 @@ class Population: } # Check for gridtype input - if not gridtype in ['edge', 'centred']: + if not gridtype in ["edge", "centred"]: msg = "Unknown gridtype value. Please start another one" raise ValueError(msg) @@ -714,10 +721,14 @@ class Population: if self.custom_options.get("data_dir", None): if not self.custom_options.get("base_filename", None): base_name = "simulation_{}".format( - datetime.datetime.strftime(datetime.datetime.now(), "%Y%m%d_%H%M%S") + datetime.datetime.strftime( + datetime.datetime.now(), "%Y%m%d_%H%M%S" + ) ) else: - base_name = os.path.splitext(self.custom_options["base_filename"])[0] + base_name = os.path.splitext(self.custom_options["base_filename"])[ + 0 + ] settings_name = base_name + "_settings.json" @@ -778,7 +789,10 @@ class Population: """ # Only if the values are the 'default' unset values - if (self.grid_options["custom_logging_func_memaddr"] == -1 and self.grid_options["_custom_logging_shared_library_file"] is None): + if ( + self.grid_options["custom_logging_func_memaddr"] == -1 + and self.grid_options["_custom_logging_shared_library_file"] is None + ): verbose_print( "Creating and loading custom logging functionality", self.grid_options["verbosity"], @@ -789,7 +803,8 @@ class Population: # Generate entire shared lib code around logging lines custom_logging_code = binary_c_log_code( self.grid_options["C_logging_code"], - verbosity=self.grid_options["verbosity"]-(_CUSTOM_LOGGING_VERBOSITY_LEVEL-1), + verbosity=self.grid_options["verbosity"] + - (_CUSTOM_LOGGING_VERBOSITY_LEVEL - 1), ) # Load memory address @@ -798,7 +813,8 @@ class Population: self.grid_options["_custom_logging_shared_library_file"], ) = create_and_load_logging_function( custom_logging_code, - verbosity=self.grid_options["verbosity"]-(_CUSTOM_LOGGING_VERBOSITY_LEVEL-1), + verbosity=self.grid_options["verbosity"] + - (_CUSTOM_LOGGING_VERBOSITY_LEVEL - 1), custom_tmp_dir=self.grid_options["tmp_dir"], ) @@ -806,12 +822,15 @@ class Population: # Generate real logging code logging_line = autogen_C_logging_code( self.grid_options["C_auto_logging"], - verbosity=self.grid_options["verbosity"]-(_CUSTOM_LOGGING_VERBOSITY_LEVEL-1), + verbosity=self.grid_options["verbosity"] + - (_CUSTOM_LOGGING_VERBOSITY_LEVEL - 1), ) # Generate entire shared lib code around logging lines custom_logging_code = binary_c_log_code( - logging_line, verbosity=self.grid_options["verbosity"]-(_CUSTOM_LOGGING_VERBOSITY_LEVEL-1) + logging_line, + verbosity=self.grid_options["verbosity"] + - (_CUSTOM_LOGGING_VERBOSITY_LEVEL - 1), ) # Load memory address @@ -820,7 +839,8 @@ class Population: self.grid_options["_custom_logging_shared_library_file"], ) = create_and_load_logging_function( custom_logging_code, - verbosity=self.grid_options["verbosity"]-(_CUSTOM_LOGGING_VERBOSITY_LEVEL-1), + verbosity=self.grid_options["verbosity"] + - (_CUSTOM_LOGGING_VERBOSITY_LEVEL - 1), custom_tmp_dir=self.grid_options["tmp_dir"], ) else: @@ -858,23 +878,27 @@ class Population: self.grid_options["_population_id"] = uuid.uuid4().hex # save number of stored log stats - self.shared_memory["n_saved_log_stats"] = multiprocessing.Value('i', 0) + self.shared_memory["n_saved_log_stats"] = multiprocessing.Value("i", 0) # set previous logging time _t = time.time() - self.shared_memory["prev_log_time"] = multiprocessing.Array('d', [_t] * self.grid_options['n_logging_stats']) + self.shared_memory["prev_log_time"] = multiprocessing.Array( + "d", [_t] * self.grid_options["n_logging_stats"] + ) # set previous logging system number to 0 - self.shared_memory["prev_log_system_number"] = multiprocessing.Array('i', [0] * self.grid_options['n_logging_stats']) + self.shared_memory["prev_log_system_number"] = multiprocessing.Array( + "i", [0] * self.grid_options["n_logging_stats"] + ) # array to store memory use per-thread mem = 1.0 * mem_use() - self.shared_memory["memory_use_per_thread"] = multiprocessing.Array('d', [mem] * self.grid_options["amt_cores"]) + self.shared_memory["memory_use_per_thread"] = multiprocessing.Array( + "d", [mem] * self.grid_options["amt_cores"] + ) - def clean( - self - ) -> None: + def clean(self) -> None: """ Clean the contents of the population object so it can be reused. @@ -941,11 +965,11 @@ class Population: "total_probability_weighted_mass_run": self.grid_options[ "_total_probability_weighted_mass_run" ], - "zero_prob_stars_skipped": self.grid_options['_zero_prob_stars_skipped'] + "zero_prob_stars_skipped": self.grid_options["_zero_prob_stars_skipped"], } # Add analytics dict to the metadata too: - self.grid_ensemble_results['metadata'].update(analytics_dict) + self.grid_ensemble_results["metadata"].update(analytics_dict) ## # Clean up code: remove files, unset values, unload interpolators etc. This is placed in the general evolve function, @@ -1065,7 +1089,7 @@ class Population: """ stream_logger = self._get_stream_logger() - if self.grid_options['verbosity'] >= _LOGGER_VERBOSITY_LEVEL: + if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL: stream_logger.debug(f"setting up the system_queue_filler now") # Setup of the generator @@ -1103,7 +1127,7 @@ class Population: # print("\tsystem_queue_filler: Size dir(self): {}".format(convert_bytes(bytes_size_self))) # Send closing signal to workers. When they receive this they will terminate - if self.grid_options['verbosity'] >= _LOGGER_VERBOSITY_LEVEL: + if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL: stream_logger.debug(f"Signaling stop to processes") # DEBUG for _ in range(amt_cores): @@ -1130,7 +1154,7 @@ class Population: """ # Set process name - setproctitle.setproctitle('binarycpython parent process') + setproctitle.setproctitle("binarycpython parent process") # Set up the manager object that can share info between processes manager = multiprocessing.Manager() @@ -1169,11 +1193,15 @@ class Population: break # Extra ensemble result manipulation: - combined_output_dict['ensemble_results']['ensemble'] = format_ensemble_results(combined_output_dict['ensemble_results'].get('ensemble', {})) + combined_output_dict["ensemble_results"]["ensemble"] = format_ensemble_results( + combined_output_dict["ensemble_results"].get("ensemble", {}) + ) gc.collect() # Take into account that we run this on multiple cores - combined_output_dict["_total_probability_weighted_mass_run"] = combined_output_dict["_total_probability_weighted_mass_run"] + combined_output_dict[ + "_total_probability_weighted_mass_run" + ] = combined_output_dict["_total_probability_weighted_mass_run"] # Put the values back as object properties self.grid_results = combined_output_dict["results"] @@ -1192,10 +1220,20 @@ class Population: self.grid_ensemble_results["metadata"][ "total_probability_weighted_mass" ] = combined_output_dict["_total_probability_weighted_mass_run"] - self.grid_ensemble_results["metadata"]["factored_in_probability_weighted_mass"] = False - if self.grid_options['ensemble_factor_in_probability_weighted_mass']: - multiply_values_dict(self.grid_ensemble_results['ensemble'], 1/self.grid_ensemble_results["metadata"]['total_probability_weighted_mass']) - self.grid_ensemble_results["metadata"]["factored_in_probability_weighted_mass"] = True + self.grid_ensemble_results["metadata"][ + "factored_in_probability_weighted_mass" + ] = False + if self.grid_options["ensemble_factor_in_probability_weighted_mass"]: + multiply_values_dict( + self.grid_ensemble_results["ensemble"], + 1 + / self.grid_ensemble_results["metadata"][ + "total_probability_weighted_mass" + ], + ) + self.grid_ensemble_results["metadata"][ + "factored_in_probability_weighted_mass" + ] = True # Add settings of the populations all_info = self.return_all_info( @@ -1204,7 +1242,9 @@ class Population: include_binary_c_version_info=True, include_binary_c_help_all=True, ) - self.grid_ensemble_results["metadata"]["settings"] = json.loads(json.dumps(all_info, default=binaryc_json_serializer)) + self.grid_ensemble_results["metadata"]["settings"] = json.loads( + json.dumps(all_info, default=binaryc_json_serializer) + ) ############################## # Update grid options @@ -1221,7 +1261,9 @@ class Population: self.grid_options[ "_total_probability_weighted_mass_run" ] = combined_output_dict["_total_probability_weighted_mass_run"] - self.grid_options['_zero_prob_stars_skipped'] = combined_output_dict['_zero_prob_stars_skipped'] + self.grid_options["_zero_prob_stars_skipped"] = combined_output_dict[ + "_zero_prob_stars_skipped" + ] def _evolve_system_mp(self, full_system_dict): """ @@ -1277,12 +1319,12 @@ class Population: ) stream_logger = self._get_stream_logger() - if self.grid_options['verbosity'] >= _LOGGER_VERBOSITY_LEVEL: + if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL: stream_logger.debug(f"Setting up processor: process-{self.process_ID}") # Set the process names - name = 'binarycpython population thread {}'.format(ID) - name_proc = 'binarycpython population process {}'.format(ID) + name = "binarycpython population thread {}".format(ID) + name_proc = "binarycpython population process {}".format(ID) setproctitle.setproctitle(name_proc) # setproctitle.setthreadtitle(name) @@ -1346,7 +1388,9 @@ class Population: # variables for the statu bar prints start_grid_time = time.time() - next_log_time = self.shared_memory["prev_log_time"][0] + self.grid_options["log_dt"] + next_log_time = ( + self.shared_memory["prev_log_time"][0] + self.grid_options["log_dt"] + ) next_mem_update_time = start_grid_time + self.grid_options["log_dt"] ############################################################ @@ -1370,8 +1414,6 @@ class Population: full_system_dict = self.bse_options.copy() full_system_dict.update(system_dict) - - # In the first system, explicitly check all the keys that are passed to see if # they match the keys known to binary_c. # Won't do that every system cause that is a bit of a waste of computing time. @@ -1407,7 +1449,6 @@ class Population: # 1, # ) - ###################### # Print status of runs # save the current time (used often) @@ -1419,7 +1460,9 @@ class Population: next_mem_update_time = now + self.grid_options["log_dt"] # calculate the next logging time - next_log_time = self.shared_memory["prev_log_time"][0] + self.grid_options["log_dt"] + next_log_time = ( + self.shared_memory["prev_log_time"][0] + self.grid_options["log_dt"] + ) # Check if we need to log info again # TODO: Check if we can put this functionality elsewhere @@ -1434,22 +1477,33 @@ class Population: # Set some values for next time next_log_time = now + self.grid_options["log_dt"] - #print("PREV ",self.shared_memory["prev_log_time"]) - #print("N LOG STATS",self.shared_memory["n_saved_log_stats"].value) + # print("PREV ",self.shared_memory["prev_log_time"]) + # print("N LOG STATS",self.shared_memory["n_saved_log_stats"].value) # shift the arrays - self.shared_memory["prev_log_time"][-(self.grid_options['n_logging_stats'] - 1):] = self.shared_memory["prev_log_time"][:(self.grid_options['n_logging_stats'] - 1)] - self.shared_memory["prev_log_system_number"][-(self.grid_options['n_logging_stats'] - 1):] = self.shared_memory["prev_log_system_number"][:(self.grid_options['n_logging_stats'] - 1)] + self.shared_memory["prev_log_time"][ + -(self.grid_options["n_logging_stats"] - 1) : + ] = self.shared_memory["prev_log_time"][ + : (self.grid_options["n_logging_stats"] - 1) + ] + self.shared_memory["prev_log_system_number"][ + -(self.grid_options["n_logging_stats"] - 1) : + ] = self.shared_memory["prev_log_system_number"][ + : (self.grid_options["n_logging_stats"] - 1) + ] # set the current time and system number self.shared_memory["prev_log_time"][0] = now self.shared_memory["prev_log_system_number"][0] = system_number # increase the number of stats - self.shared_memory["n_saved_log_stats"].value = min(self.shared_memory["n_saved_log_stats"].value+1,self.grid_options['n_logging_stats']) + self.shared_memory["n_saved_log_stats"].value = min( + self.shared_memory["n_saved_log_stats"].value + 1, + self.grid_options["n_logging_stats"], + ) - #print("FIRST (0) ",self.shared_memory["prev_log_time"][0]) - #print("LAST (",self.shared_memory["n_saved_log_stats"].value-1,")",self.shared_memory["prev_log_time"][self.shared_memory["n_saved_log_stats"].value-1]) + # print("FIRST (0) ",self.shared_memory["prev_log_time"][0]) + # print("LAST (",self.shared_memory["n_saved_log_stats"].value-1,")",self.shared_memory["prev_log_time"][self.shared_memory["n_saved_log_stats"].value-1]) ############### # Log current system info @@ -1468,7 +1522,6 @@ class Population: binary_cmdline_string = self._return_argline(full_system_dict) f.write(binary_cmdline_string) - ############## # Running the system start_runtime_binary_c = time.time() @@ -1479,7 +1532,7 @@ class Population: # Check option to ignore 0 probability systems if not self.grid_options["run_zero_probability_system"]: - if full_system_dict['probability'] == 0: + if full_system_dict["probability"] == 0: run_system = False zero_prob_stars_skipped += 1 @@ -1546,7 +1599,7 @@ class Population: ) as f: f.write("FINISHING") - if self.grid_options['verbosity'] >= _LOGGER_VERBOSITY_LEVEL: + if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL: stream_logger.debug(f"Process-{self.process_ID} is finishing.") #################### @@ -1646,7 +1699,6 @@ class Population: self.grid_options["_failed_count"], self.grid_options["_failed_prob"], zero_prob_stars_skipped, - ), self.grid_options["verbosity"], 1, @@ -1691,7 +1743,7 @@ class Population: result_queue.put(output_dict) - if self.grid_options['verbosity'] >= _LOGGER_VERBOSITY_LEVEL: + if self.grid_options["verbosity"] >= _LOGGER_VERBOSITY_LEVEL: stream_logger.debug(f"Process-{self.process_ID} is finished.") # Clean up the interpolators if they exist @@ -1718,14 +1770,15 @@ class Population: ### Custom logging code: self._set_custom_logging() - # Check if there are actually arguments passed: if self.bse_options: # Get argument line and argline = self._return_argline(self.bse_options) - verbose_print("Running {}".format(argline), self.grid_options["verbosity"], 1) + verbose_print( + "Running {}".format(argline), self.grid_options["verbosity"], 1 + ) # Run system out = _binary_c_bindings.run_system( @@ -1842,7 +1895,7 @@ class Population: raise ValueError # Set up the grid code with a dry run option to see total probability - if self.grid_options['do_dry_run']: + if self.grid_options["do_dry_run"]: print("Doing dry run to calculate total starcount and probability") self._generate_grid_code(dry_run=True) @@ -1854,11 +1907,12 @@ class Population: print( "Total starcount for this run will be: {} with a total probability of {}".format( - self.grid_options["_total_starcount"], self.grid_options['_probtot'] + self.grid_options["_total_starcount"], + self.grid_options["_probtot"], ) ) - if self.grid_options['exit_after_dry_run']: + if self.grid_options["exit_after_dry_run"]: exit() ####################### @@ -1889,7 +1943,7 @@ class Population: # TODO: fix this function # self._load_source_file_function() - if self.grid_options['do_dry_run']: + if self.grid_options["do_dry_run"]: # Do a dry run self._dry_run_source_file() @@ -1960,19 +2014,18 @@ class Population: # a variable grid ################################################### - def _add_code(self, *args, indent=0,mindent=-1): + def _add_code(self, *args, indent=0, mindent=-1): """ Function to add code to the grid code string add code to the code_string - + indent (=0) is added once at the beginning mindent (=0) is added for every line - + don't use both! """ - # if we have multiple arguments and have forgotten # to set mindent = 0, do it here if indent == 0 and mindent == -1 and len(args) > 0: @@ -2030,7 +2083,6 @@ class Population: total_grid_variables = len(self.grid_options["_grid_variables"]) - self._add_code( # Import packages "import math\n", @@ -2041,33 +2093,28 @@ class Population: "from binarycpython.utils.useful_funcs import *\n", "\n\n", # Make the function - "def grid_code(self, print_results=True):\n") + "def grid_code(self, print_results=True):\n", + ) # Increase indent_depth self._increment_indent_depth(+1) self._add_code( # Write some info in the function - "# Grid code generated on {}\n".format( - datetime.datetime.now().isoformat() - ), + "# Grid code generated on {}\n".format(datetime.datetime.now().isoformat()), "# This function generates the systems that will be evolved with binary_c\n\n" # Set some values in the generated code: "# Setting initial values\n", "_total_starcount = 0\n", - "starcounts = [0 for i in range({})]\n".format( - total_grid_variables - ), + "starcounts = [0 for i in range({})]\n".format(total_grid_variables), "probabilities = {}\n", "probabilities_list = [0 for i in range({})]\n".format( total_grid_variables ), - "probabilities_sum = [0 for i in range({})]\n".format( - total_grid_variables - ), + "probabilities_sum = [0 for i in range({})]\n".format(total_grid_variables), "parameter_dict = {}\n", "phasevol = 1\n", - "\n" + "\n", ) # Set up the system parameters @@ -2076,18 +2123,15 @@ class Population: "M_2 = None\n", "M_3 = None\n", "M_4 = None\n", - "orbital_period = None\n", "orbital_period_triple = None\n", "orbital_period_quadruple = None\n", - "eccentricity = None\n", "eccentricity2 = None\n", "eccentricity3 = None\n", "\n", - # Prepare the probability - "# setting probability lists\n" + "# setting probability lists\n", ) for grid_variable_el in sorted( @@ -2096,9 +2140,9 @@ class Population: ): # Make probabilities dict grid_variable = grid_variable_el[1] - self._add_code('probabilities["{}"] = 0\n'.format( - grid_variable["parameter_name"] - )) + self._add_code( + 'probabilities["{}"] = 0\n'.format(grid_variable["parameter_name"]) + ) ################################################################################# # Start of code generation @@ -2123,17 +2167,18 @@ class Population: # Setting up the for loop # Add comment for for loop self._add_code( - "# for loop for {}".format(grid_variable["parameter_name"]) - + "\n", + "# for loop for {}".format(grid_variable["parameter_name"]) + "\n", "sampled_values_{} = {}".format( grid_variable["name"], grid_variable["spacingfunc"] ) - + "\n" + + "\n", ) if vb: self._add_code( - "print('samples','{}',':',sampled_values_{})\n".format(grid_variable["name"],grid_variable["name"]) + "print('samples','{}',':',sampled_values_{})\n".format( + grid_variable["name"], grid_variable["name"] + ) ) # # Some print statement @@ -2154,7 +2199,9 @@ class Population: if vb: self._add_code( - "print('sample {} from',sampled_values_{})".format(grid_variable["name"],grid_variable["name"]) + "print('sample {} from',sampled_values_{})".format( + grid_variable["name"], grid_variable["name"] + ) + "\n" ) @@ -2170,8 +2217,7 @@ class Population: # use resolution passed in self._add_code( "for {}_sample_number in range({}):".format( - grid_variable["name"], - grid_variable["resolution"] + grid_variable["name"], grid_variable["resolution"] ) + "\n" ) @@ -2181,26 +2227,28 @@ class Population: # {}_this and {}_next are this grid point's index and the next # grid point's index, used in many calculations below self._add_code("if {}_sample_number == 0:\n".format(grid_variable["name"])) - self._add_code( - "{}_this = 0;\n".format(grid_variable["name"]), - indent=1 - ) + self._add_code("{}_this = 0;\n".format(grid_variable["name"]), indent=1) self._add_code("else:\n") self._add_code( - "{}_this = {}_sample_number; ".format(grid_variable["name"], grid_variable["name"]), - indent=1 + "{}_this = {}_sample_number; ".format( + grid_variable["name"], grid_variable["name"] + ), + indent=1, ) self._add_code( "\n", - "{}_next = {}_this + 1".format(grid_variable["name"], grid_variable["name"]), - "\n") + "{}_next = {}_this + 1".format( + grid_variable["name"], grid_variable["name"] + ), + "\n", + ) # TODO: Make clear that the phasevol only works good # TODO: add option to ignore this phasevol calculation and set it to 1 # if you sample linearly in that thing. # if phasevol is <= 0 then we SKIP that whole loop. Its not working then. if ( - not grid_variable["dphasevol"] == -1 + not grid_variable["dphasevol"] == -1 ): # A method to turn off this calculation and allow a phasevol = 1 self._add_code( "phasevol_{} = sampled_values_{}[{}_next]-sampled_values_{}[{}_this]".format( @@ -2208,34 +2256,27 @@ class Population: grid_variable["name"], grid_variable["name"], grid_variable["name"], - grid_variable["name"] - ) - + "\n") - else: - self._add_code( - "phasevol_{} = 1\n".format( - grid_variable["name"] + grid_variable["name"], ) + + "\n" ) + else: + self._add_code("phasevol_{} = 1\n".format(grid_variable["name"])) ############## # Add phasevol check: - self._add_code( - "if phasevol_{} <= 0:\n".format(grid_variable["name"]) - ) + self._add_code("if phasevol_{} <= 0:\n".format(grid_variable["name"])) # TODO: We might actually want to add the starcount and probability to the totals regardless. # n that case we need another local variable which will prevent it from being run but will track those parameters # Add phasevol check action: self._add_code( 'print("Grid generator: phasevol_{} <= 0! (this=",{}_this,", next=",{}_next,") Skipping current sample.")'.format( - grid_variable["name"], - grid_variable["name"], - grid_variable["name"] + grid_variable["name"], grid_variable["name"], grid_variable["name"] ) + "\n", "continue" + "\n", - mindent=1 + mindent=1, ) if vb: @@ -2244,43 +2285,49 @@ class Population: grid_variable["name"], grid_variable["name"], grid_variable["name"], - grid_variable["name"] - ) + grid_variable["name"], + ) + "\n" ) # select point location based on gridtype (left, centre or right) - if (grid_variable['gridtype'] == 'edge' or - grid_variable['gridtype'] == 'left' or - grid_variable['gridtype'] == 'left edge'): + if ( + grid_variable["gridtype"] == "edge" + or grid_variable["gridtype"] == "left" + or grid_variable["gridtype"] == "left edge" + ): self._add_code( "{} = sampled_values_{}[{}_this]".format( grid_variable["name"], grid_variable["name"], - grid_variable["name"] + grid_variable["name"], ) + "\n" ) - elif (grid_variable['gridtype'] == 'right' or - grid_variable['gridtype'] == 'right edge'): + elif ( + grid_variable["gridtype"] == "right" + or grid_variable["gridtype"] == "right edge" + ): self._add_code( - + "{} = sampled_values_{}[{}_next]".format( + +"{} = sampled_values_{}[{}_next]".format( + grid_variable["name"], grid_variable["name"], grid_variable["name"], - grid_variable["name"] ) + "\n" ) - elif (grid_variable['gridtype'] == 'centred' or - grid_variable['gridtype'] == 'centre' or - grid_variable['gridtype'] == 'center'): + elif ( + grid_variable["gridtype"] == "centred" + or grid_variable["gridtype"] == "centre" + or grid_variable["gridtype"] == "center" + ): self._add_code( "{} = 0.5 * (sampled_values_{}[{}_next] + sampled_values_{}[{}_this])".format( grid_variable["name"], grid_variable["name"], grid_variable["name"], grid_variable["name"], - grid_variable["name"] + grid_variable["name"], ) + "\n" ) @@ -2290,7 +2337,9 @@ class Population: if vb: self._add_code( - "print('hence {} = ',{})\n".format(grid_variable["name"], grid_variable["name"]) + "print('hence {} = ',{})\n".format( + grid_variable["name"], grid_variable["name"] + ) ) ################################################################################# @@ -2304,7 +2353,7 @@ class Population: "# Condition for {}\n".format(grid_variable["parameter_name"]), # Add condition check "if not {}:\n".format(grid_variable["condition"]), - mindent=0 + mindent=0, ) # Add condition failed action: @@ -2314,7 +2363,8 @@ class Population: ) + "\n", "continue" + "\n", - mindent=1) + mindent=1, + ) # Add some white line self._add_code("\n") @@ -2346,9 +2396,11 @@ class Population: ) # Set phasevol - self._add_code("phasevol *= phasevol_{}\n".format( - grid_variable["name"], - )) + self._add_code( + "phasevol *= phasevol_{}\n".format( + grid_variable["name"], + ) + ) ####################### # Probabilities @@ -2364,15 +2416,14 @@ class Population: + "\n", # Save probability sum "probabilities_sum[{}] += d{}".format( - grid_variable["grid_variable_number"], grid_variable["name"] + grid_variable["grid_variable_number"], grid_variable["name"] ) - + "\n" + + "\n", ) if grid_variable["grid_variable_number"] == 0: self._add_code( - "probabilities_list[0] = d{}".format(grid_variable["name"]) - + "\n" + "probabilities_list[0] = d{}".format(grid_variable["name"]) + "\n" ) else: self._add_code( @@ -2386,20 +2437,21 @@ class Population: ####################### # Increment starcount for this parameter - self._add_code("\n", - "# Increment starcount for {}\n".format(grid_variable["parameter_name"]), - - "starcounts[{}] += 1".format( - grid_variable["grid_variable_number"], - ) - + "\n", - - # Add value to dict - 'parameter_dict["{}"] = {}'.format( - grid_variable["parameter_name"], grid_variable["parameter_name"] - ) - + "\n", - "\n" + self._add_code( + "\n", + "# Increment starcount for {}\n".format( + grid_variable["parameter_name"] + ), + "starcounts[{}] += 1".format( + grid_variable["grid_variable_number"], + ) + + "\n", + # Add value to dict + 'parameter_dict["{}"] = {}'.format( + grid_variable["parameter_name"], grid_variable["parameter_name"] + ) + + "\n", + "\n", ) self._increment_indent_depth(-1) @@ -2436,14 +2488,14 @@ class Population: self._increment_indent_depth(+1) self._add_code( "#" * 40 + "\n", - "# Code below is for finalising the handling of this iteration of the parameter {}\n".format(grid_variable["name"]) + "# Code below is for finalising the handling of this iteration of the parameter {}\n".format( + grid_variable["name"] + ), ) # Set phasevol # TODO: fix. this isn't supposed to be the value that we give it here. discuss - self._add_code( - "phasevol /= phasevol_{}\n\n".format(grid_variable["name"]) - ) + self._add_code("phasevol /= phasevol_{}\n\n".format(grid_variable["name"])) self._increment_indent_depth(-2) @@ -2454,7 +2506,6 @@ class Population: self._increment_indent_depth(+1) - self._add_code( # Add comment "# Condition for branchpoint at {}".format( @@ -2464,10 +2515,8 @@ class Population: # # Add condition check # "if not {}:".format(grid_variable["condition"]) # + "\n" - # Add branchpoint - "if multiplicity=={}:".format(grid_variable["branchpoint"]) - + "\n" + "if multiplicity=={}:".format(grid_variable["branchpoint"]) + "\n", ) self._write_gridcode_system_call( @@ -2482,18 +2531,13 @@ class Population: # Finalising print statements # self._increment_indent_depth(+1) - self._add_code( - "\n", - "#" * 40 + "\n", - "if print_results:\n" - ) + self._add_code("\n", "#" * 40 + "\n", "if print_results:\n") self._add_code( "print('Grid has handled {} stars'.format(_total_starcount))\n", "print('with a total probability of {}'.format(self.grid_options['_probtot']))\n", - mindent=1 + mindent=1, ) - ################ # Finalising return statement for dry run. # @@ -2527,10 +2571,7 @@ class Population: with open(gridcode_filename, "w") as file: file.write(self.code_string) - - def _write_gridcode_system_call( - self, grid_variable, dry_run, branchpoint - ): + def _write_gridcode_system_call(self, grid_variable, dry_run, branchpoint): ################################################################################# # Here are the calls to the queuing or other solution. this part is for every system # Add comment @@ -2539,7 +2580,9 @@ class Population: if branchpoint: self._add_code( - "# Code below will get evaluated for every system at this level of multiplicity (last one of that being {})\n".format(grid_variable["name"]) + "# Code below will get evaluated for every system at this level of multiplicity (last one of that being {})\n".format( + grid_variable["name"] + ) ) else: self._add_code( @@ -2554,24 +2597,19 @@ class Population: grid_variable["grid_variable_number"] ) + "\n", - - - # Take into account the multiplicity fraction: + # Take into account the multiplicity fraction: "\n", "# Factor the multiplicity fraction into the probability\n", - 'probability = probability * self._calculate_multiplicity_fraction(parameter_dict)' + "probability = probability * self._calculate_multiplicity_fraction(parameter_dict)" + "\n", - - # Add division by amount of repeats "\n", "# Divide the probability by the amount of repeats\n", - 'probability = probability / self.grid_options["repeat"]' - + "\n", + 'probability = probability / self.grid_options["repeat"]' + "\n", # Now we yield the system self.grid_options["repeat"] times. "\n", "# Loop over the repeats\n", - 'for _ in range(self.grid_options["repeat"]):' + "\n" + 'for _ in range(self.grid_options["repeat"]):' + "\n", ) self._add_code( "_total_starcount += 1\n", @@ -2580,22 +2618,16 @@ class Population: 'parameter_dict["{}"] = {}'.format("phasevol", "phasevol") + "\n", # Increment total probability "self._increment_probtot(probability)\n", - mindent=1 + mindent=1, ) if not dry_run: # Handling of what is returned, or what is not. - self._add_code( - "yield(parameter_dict)\n", - indent=1 - ) + self._add_code("yield(parameter_dict)\n", indent=1) # If its a dry run, dont do anything with it else: - self._add_code( - "pass\n", - indent=1 - ) + self._add_code("pass\n", indent=1) self._add_code("#" * 40 + "\n") @@ -3549,13 +3581,22 @@ class Population: if options: # Take the option dictionary that was given and override. - options = update_dicts(self.grid_options['m&s_options'], options) - self.grid_options['m&s_options'] = copy.deepcopy(dict(options)) + options = update_dicts(self.grid_options["m&s_options"], options) + self.grid_options["m&s_options"] = copy.deepcopy(dict(options)) # Write options to a file - os.makedirs(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True) - with open(os.path.join(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), "moeopts.dat"), "w") as f: - f.write(json.dumps(self.grid_options['m&s_options'], indent=4)) + os.makedirs( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + exist_ok=True, + ) + with open( + os.path.join( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + "moeopts.dat", + ), + "w", + ) as f: + f.write(json.dumps(self.grid_options["m&s_options"], indent=4)) def _load_moe_di_stefano_data(self): """ @@ -3563,9 +3604,12 @@ class Population: """ # Only if the grid is loaded and Moecache contains information - if (not self.grid_options['_loaded_ms_data']) and not Moecache: + if (not self.grid_options["_loaded_ms_data"]) and not Moecache: # Load the JSON data - json_data = get_moe_di_stefano_dataset(self.grid_options['m&s_options'], verbosity=self.grid_options['verbosity']) + json_data = get_moe_di_stefano_dataset( + self.grid_options["m&s_options"], + verbosity=self.grid_options["verbosity"], + ) # entry of log10M1 is a list containing 1 dict. We can take the dict out of the list json_data["log10M1"] = json_data["log10M1"][0] @@ -3582,8 +3626,17 @@ class Population: raise ValueError(msg) # Write to file - os.makedirs(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True) - with open(os.path.join(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), "moe.log"), "w") as logfile: + os.makedirs( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + exist_ok=True, + ) + with open( + os.path.join( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + "moe.log", + ), + "w", + ) as logfile: logfile.write("logâ‚â‚€Masses(M☉) {}\n".format(logmasses)) # Get all the periods and see if they are all consistently present @@ -3621,9 +3674,13 @@ class Population: raise ValueError(msg) # Write to file - os.makedirs(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True) + os.makedirs( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + exist_ok=True, + ) with open( - os.path.join(self.grid_options["tmp_dir"], "moe_distefano", "moe.log"), "a" + os.path.join(self.grid_options["tmp_dir"], "moe_distefano", "moe.log"), + "a", ) as logfile: logfile.write("logâ‚â‚€Periods(days) {}\n".format(logperiods)) @@ -3745,7 +3802,12 @@ class Population: for ecc in eccs: Moecache["ecc_distributions"].append( - [float(logmass), float(logperiod), float(ecc), ecc_data[ecc]] + [ + float(logmass), + float(logperiod), + float(ecc), + ecc_data[ecc], + ] ) ############################################################ @@ -3803,12 +3865,21 @@ class Population: ) # Write to log file - os.makedirs(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), exist_ok=True) - with open(os.path.join(os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), "moecache.json"), "w") as cache_filehandle: + os.makedirs( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + exist_ok=True, + ) + with open( + os.path.join( + os.path.join(self.grid_options["tmp_dir"], "moe_distefano"), + "moecache.json", + ), + "w", + ) as cache_filehandle: cache_filehandle.write(json.dumps(Moecache, indent=4)) # Signal that the data has been loaded - self.grid_options['_loaded_ms_data'] = True + self.grid_options["_loaded_ms_data"] = True def _set_moe_di_stefano_distributions(self): """ @@ -3819,7 +3890,9 @@ class Population: # first, the multiplicity, this is 1,2,3,4, ... # for singles, binaries, triples, quadruples, ... - max_multiplicity = get_max_multiplicity(self.grid_options['m&s_options']["multiplicity_modulator"]) + max_multiplicity = get_max_multiplicity( + self.grid_options["m&s_options"]["multiplicity_modulator"] + ) verbose_print( "\tMoe_di_Stefano_2017: Max multiplicity = {}".format(max_multiplicity), self.grid_options["verbosity"], @@ -3837,10 +3910,10 @@ class Population: resolution=4, spacingfunc="range(1, 5)", precode='self.grid_options["multiplicity"] = multiplicity; self.bse_options["multiplicity"] = multiplicity; options={}'.format( - self.grid_options['m&s_options'] + self.grid_options["m&s_options"] ), condition="({}[multiplicity-1] > 0)".format( - str(self.grid_options['m&s_options']["multiplicity_modulator"]) + str(self.grid_options["m&s_options"]["multiplicity_modulator"]) ), gridtype="edge", dphasevol=-1, @@ -3860,19 +3933,21 @@ class Population: longname="Primary mass", resolution="options['resolutions']['M'][0]", spacingfunc="const(np.log({}), np.log({}), {})".format( - self.grid_options['m&s_options']["ranges"]["M"][0], - self.grid_options['m&s_options']["ranges"]["M"][1], - self.grid_options['m&s_options']["resolutions"]["M"][0], + self.grid_options["m&s_options"]["ranges"]["M"][0], + self.grid_options["m&s_options"]["ranges"]["M"][1], + self.grid_options["m&s_options"]["resolutions"]["M"][0], ), valuerange=[ - "np.log({})".format(self.grid_options['m&s_options']["ranges"]["M"][0]), - "np.log({})".format(self.grid_options['m&s_options']["ranges"]["M"][1]), + "np.log({})".format(self.grid_options["m&s_options"]["ranges"]["M"][0]), + "np.log({})".format(self.grid_options["m&s_options"]["ranges"]["M"][1]), ], gridtype="centred", dphasevol="dlnm1", precode='M_1 = np.exp(lnm1); options["M_1"]=M_1', probdist="Moe_di_Stefano_2017_pdf({{{}, {}, {}}}, verbosity=self.grid_options['verbosity'])['total_probdens'] if multiplicity == 1 else 1".format( - str(dict(self.grid_options['m&s_options']))[1:-1], "'multiplicity': multiplicity", "'M_1': M_1" + str(dict(self.grid_options["m&s_options"]))[1:-1], + "'multiplicity': multiplicity", + "'M_1': M_1", ), ) @@ -3883,25 +3958,30 @@ class Population: name="log10per", parameter_name="orbital_period", longname="log10(Orbital_Period)", - resolution=self.grid_options['m&s_options']["resolutions"]["logP"][0], + resolution=self.grid_options["m&s_options"]["resolutions"]["logP"][0], probdist=1.0, condition='(self.grid_options["multiplicity"] >= 2)', - branchpoint=1 if max_multiplicity > 1 else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. + branchpoint=1 + if max_multiplicity > 1 + else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. gridtype="centred", dphasevol="({} * dlog10per)".format(LOG_LN_CONVERTER), - valuerange=[self.grid_options['m&s_options']["ranges"]["logP"][0], self.grid_options['m&s_options']["ranges"]["logP"][1]], + valuerange=[ + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], + ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["logP"][0], - self.grid_options['m&s_options']["ranges"]["logP"][1], - self.grid_options['m&s_options']["resolutions"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], + self.grid_options["m&s_options"]["resolutions"]["logP"][0], ), precode="""orbital_period = 10.0**log10per qmin={}/M_1 qmax=maximum_mass_ratio_for_RLOF(M_1, orbital_period) """.format( - self.grid_options['m&s_options']["Mmin"] + self.grid_options["m&s_options"]["Mmin"] ), - ) # TODO: change the maximum_mass_ratio_for_RLOF + ) # TODO: change the maximum_mass_ratio_for_RLOF # binaries: mass ratio self.add_grid_variable( @@ -3909,14 +3989,14 @@ qmax=maximum_mass_ratio_for_RLOF(M_1, orbital_period) parameter_name="M_2", longname="Mass ratio", valuerange=[ - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"].get("ranges", {}).get("q", None) else "options['Mmin']/M_1", - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"].get("ranges", {}).get("q", None) else "qmax", ], - resolution=self.grid_options['m&s_options']["resolutions"]["M"][1], + resolution=self.grid_options["m&s_options"]["resolutions"]["M"][1], probdist=1, gridtype="centred", dphasevol="dq", @@ -3925,35 +4005,45 @@ M_2 = q * M_1 sep = calc_sep_from_period(M_1, M_2, orbital_period) """, spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", [None, None])[0] - else "{}/M_1".format(self.grid_options['m&s_options']["Mmin"]), - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", [None, None])[1] + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", [None, None])[0] + else "{}/M_1".format(self.grid_options["m&s_options"]["Mmin"]), + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", [None, None])[1] else "qmax", - self.grid_options['m&s_options']["resolutions"]["M"][1], + self.grid_options["m&s_options"]["resolutions"]["M"][1], ), ) # (optional) binaries: eccentricity - if self.grid_options['m&s_options']["resolutions"]["ecc"][0] > 0: + if self.grid_options["m&s_options"]["resolutions"]["ecc"][0] > 0: self.add_grid_variable( name="ecc", parameter_name="eccentricity", longname="Eccentricity", - resolution=self.grid_options['m&s_options']["resolutions"]["ecc"][0], + resolution=self.grid_options["m&s_options"]["resolutions"]["ecc"][ + 0 + ], probdist=1, gridtype="centred", dphasevol="decc", precode="eccentricity=ecc", valuerange=[ - self.grid_options['m&s_options']["ranges"]["ecc"][0], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["ranges"]["ecc"][ + 0 + ], # Just fail if not defined. + self.grid_options["m&s_options"]["ranges"]["ecc"][1], ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["ecc"][0], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], - self.grid_options['m&s_options']["resolutions"]["ecc"][0], + self.grid_options["m&s_options"]["ranges"]["ecc"][ + 0 + ], # Just fail if not defined. + self.grid_options["m&s_options"]["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["resolutions"]["ecc"][0], ), ) @@ -3964,26 +4054,30 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period) name="log10per2", parameter_name="orbital_period_triple", longname="log10(Orbital_Period2)", - resolution=self.grid_options['m&s_options']["resolutions"]["logP"][1], + resolution=self.grid_options["m&s_options"]["resolutions"]["logP"][ + 1 + ], probdist=1.0, condition='(self.grid_options["multiplicity"] >= 3)', - branchpoint=2 if max_multiplicity > 2 else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. + branchpoint=2 + if max_multiplicity > 2 + else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. gridtype="centred", dphasevol="({} * dlog10per2)".format(LOG_LN_CONVERTER), valuerange=[ - self.grid_options['m&s_options']["ranges"]["logP"][0], - self.grid_options['m&s_options']["ranges"]["logP"][1], + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["logP"][0], - self.grid_options['m&s_options']["ranges"]["logP"][1], - self.grid_options['m&s_options']["resolutions"]["logP"][1], + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], + self.grid_options["m&s_options"]["resolutions"]["logP"][1], ), precode="""orbital_period_triple = 10.0**log10per2 q2min={}/(M_1+M_2) q2max=maximum_mass_ratio_for_RLOF(M_1+M_2, orbital_period_triple) """.format( - self.grid_options['m&s_options']["Mmin"] + self.grid_options["m&s_options"]["Mmin"] ), ) @@ -3994,14 +4088,18 @@ q2max=maximum_mass_ratio_for_RLOF(M_1+M_2, orbital_period_triple) parameter_name="M_3", longname="Mass ratio outer/inner", valuerange=[ - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "options['Mmin']/(M_1+M_2)", - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "q2max", ], - resolution=self.grid_options['m&s_options']["resolutions"]["M"][2], + resolution=self.grid_options["m&s_options"]["resolutions"]["M"][2], probdist=1, gridtype="centred", dphasevol="dq2", @@ -4011,35 +4109,45 @@ sep2 = calc_sep_from_period((M_1+M_2), M_3, orbital_period_triple) eccentricity2=0 """, spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "options['Mmin']/(M_1+M_2)", - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "q2max", - self.grid_options['m&s_options']["resolutions"]["M"][2], + self.grid_options["m&s_options"]["resolutions"]["M"][2], ), ) # (optional) triples: eccentricity - if self.grid_options['m&s_options']["resolutions"]["ecc"][1] > 0: + if self.grid_options["m&s_options"]["resolutions"]["ecc"][1] > 0: self.add_grid_variable( name="ecc2", parameter_name="eccentricity2", longname="Eccentricity of the triple", - resolution=self.grid_options['m&s_options']["resolutions"]["ecc"][1], + resolution=self.grid_options["m&s_options"]["resolutions"][ + "ecc" + ][1], probdist=1, gridtype="centred", dphasevol="decc2", precode="eccentricity2=ecc2", valuerange=[ - self.grid_options['m&s_options']["ranges"]["ecc"][0], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["ranges"]["ecc"][ + 0 + ], # Just fail if not defined. + self.grid_options["m&s_options"]["ranges"]["ecc"][1], ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["ecc"][0], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], - self.grid_options['m&s_options']["resolutions"]["ecc"][1], + self.grid_options["m&s_options"]["ranges"]["ecc"][ + 0 + ], # Just fail if not defined. + self.grid_options["m&s_options"]["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["resolutions"]["ecc"][1], ), ) @@ -4049,26 +4157,30 @@ eccentricity2=0 name="log10per3", parameter_name="orbital_period_quadruple", longname="log10(Orbital_Period3)", - resolution=self.grid_options['m&s_options']["resolutions"]["logP"][2], + resolution=self.grid_options["m&s_options"]["resolutions"][ + "logP" + ][2], probdist=1.0, condition='(self.grid_options["multiplicity"] >= 4)', - branchpoint=3 if max_multiplicity > 3 else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. + branchpoint=3 + if max_multiplicity > 3 + else 0, # Signal here to put a branchpoint if we have a max multiplicity higher than 1. gridtype="centred", dphasevol="({} * dlog10per3)".format(LOG_LN_CONVERTER), valuerange=[ - self.grid_options['m&s_options']["ranges"]["logP"][0], - self.grid_options['m&s_options']["ranges"]["logP"][1], + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["logP"][0], - self.grid_options['m&s_options']["ranges"]["logP"][1], - self.grid_options['m&s_options']["resolutions"]["logP"][2], + self.grid_options["m&s_options"]["ranges"]["logP"][0], + self.grid_options["m&s_options"]["ranges"]["logP"][1], + self.grid_options["m&s_options"]["resolutions"]["logP"][2], ), precode="""orbital_period_quadruple = 10.0**log10per3 q3min={}/(M_3) q3max=maximum_mass_ratio_for_RLOF(M_3, orbital_period_quadruple) """.format( - self.grid_options['m&s_options']["Mmin"] + self.grid_options["m&s_options"]["Mmin"] ), ) @@ -4078,14 +4190,20 @@ q3max=maximum_mass_ratio_for_RLOF(M_3, orbital_period_quadruple) parameter_name="M_4", longname="Mass ratio outer low/outer high", valuerange=[ - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "options['Mmin']/(M_3)", - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "q3max", ], - resolution=self.grid_options['m&s_options']["resolutions"]["M"][3], + resolution=self.grid_options["m&s_options"]["resolutions"]["M"][ + 3 + ], probdist=1, gridtype="centred", dphasevol="dq3", @@ -4095,39 +4213,47 @@ sep3 = calc_sep_from_period((M_3), M_4, orbital_period_quadruple) eccentricity3=0 """, spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["q"][0] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][0] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "options['Mmin']/(M_3)", - self.grid_options['m&s_options']["ranges"]["q"][1] - if self.grid_options['m&s_options'].get("ranges", {}).get("q", None) + self.grid_options["m&s_options"]["ranges"]["q"][1] + if self.grid_options["m&s_options"] + .get("ranges", {}) + .get("q", None) else "q3max", - self.grid_options['m&s_options']["resolutions"]["M"][2], + self.grid_options["m&s_options"]["resolutions"]["M"][2], ), ) # (optional) triples: eccentricity - if self.grid_options['m&s_options']["resolutions"]["ecc"][2] > 0: + if self.grid_options["m&s_options"]["resolutions"]["ecc"][2] > 0: self.add_grid_variable( name="ecc3", parameter_name="eccentricity3", longname="Eccentricity of the triple+quadruple/outer binary", - resolution=self.grid_options['m&s_options']["resolutions"]["ecc"][2], + resolution=self.grid_options["m&s_options"]["resolutions"][ + "ecc" + ][2], probdist=1, gridtype="centred", dphasevol="decc3", precode="eccentricity3=ecc3", valuerange=[ - self.grid_options['m&s_options']["ranges"]["ecc"][ + self.grid_options["m&s_options"]["ranges"]["ecc"][ 0 ], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["ranges"]["ecc"][1], ], spacingfunc="const({}, {}, {})".format( - self.grid_options['m&s_options']["ranges"]["ecc"][ + self.grid_options["m&s_options"]["ranges"]["ecc"][ 0 ], # Just fail if not defined. - self.grid_options['m&s_options']["ranges"]["ecc"][1], - self.grid_options['m&s_options']["resolutions"]["ecc"][2], + self.grid_options["m&s_options"]["ranges"]["ecc"][1], + self.grid_options["m&s_options"]["resolutions"]["ecc"][ + 2 + ], ), ) @@ -4138,7 +4264,7 @@ eccentricity3=0 # and add some keys to it: updated_options = "{{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}}".format( - str(dict(self.grid_options['m&s_options']))[1:-1], + str(dict(self.grid_options["m&s_options"]))[1:-1], '"multiplicity": multiplicity', '"M_1": M_1', '"M_2": M_2', @@ -4168,7 +4294,7 @@ eccentricity3=0 ) # Signal that the M&S grid has been set - self.grid_options['_set_ms_grid'] = True + self.grid_options["_set_ms_grid"] = True ################################################################################################ def Moe_di_Stefano_2017(self, options=None): @@ -4218,7 +4344,7 @@ eccentricity3=0 """ # Just return 1 if no option has been chosen - if self.grid_options['multiplicity_fraction_function'] == 0: + if self.grid_options["multiplicity_fraction_function"] == 0: verbose_print( "_calculate_multiplicity_fraction: Chosen not to use any multiplicity fraction.", self.grid_options["verbosity"], @@ -4228,12 +4354,12 @@ eccentricity3=0 return 1 # Raise an error if the multiplicity is not set - if not system_dict.get('multiplicity', None): + if not system_dict.get("multiplicity", None): msg = "Multiplicity value has not been set. When using a specific multiplicity fraction function please set the multiplicity" raise ValueError(msg) # Go over the chosen options - if self.grid_options['multiplicity_fraction_function'] == 1: + if self.grid_options["multiplicity_fraction_function"] == 1: # Arenou 2010 will be used verbose_print( "_calculate_multiplicity_fraction: Using Arenou 2010 to calculate multiplicity fractions", @@ -4241,10 +4367,15 @@ eccentricity3=0 2, ) - binary_fraction = Arenou2010_binary_fraction(system_dict['M_1']) - multiplicity_fraction_dict = {1: 1-binary_fraction, 2: binary_fraction, 3: 0, 4: 0} + binary_fraction = Arenou2010_binary_fraction(system_dict["M_1"]) + multiplicity_fraction_dict = { + 1: 1 - binary_fraction, + 2: binary_fraction, + 3: 0, + 4: 0, + } - elif self.grid_options['multiplicity_fraction_function'] == 2: + elif self.grid_options["multiplicity_fraction_function"] == 2: # Raghavan 2010 will be used verbose_print( "_calculate_multiplicity_fraction: Using Rhagavan 2010 to calculate multiplicity fractions", @@ -4252,15 +4383,22 @@ eccentricity3=0 2, ) - binary_fraction = raghavan2010_binary_fraction(system_dict['M_1']) - multiplicity_fraction_dict = {1: 1-binary_fraction, 2: binary_fraction, 3: 0, 4: 0} + binary_fraction = raghavan2010_binary_fraction(system_dict["M_1"]) + multiplicity_fraction_dict = { + 1: 1 - binary_fraction, + 2: binary_fraction, + 3: 0, + 4: 0, + } - elif self.grid_options['multiplicity_fraction_function'] == 3: + elif self.grid_options["multiplicity_fraction_function"] == 3: # We need to check several things now here: # First, are the options for the M&S grid set? On start it is filled with the default settings - if not self.grid_options['m&s_options']: - msg = "The M&S options do not seem to be set properly. The value is {}".format(self.grid_options['m&s_options']) + if not self.grid_options["m&s_options"]: + msg = "The M&S options do not seem to be set properly. The value is {}".format( + self.grid_options["m&s_options"] + ) raise ValueError(msg) # Second: is the Moecache filled. @@ -4275,19 +4413,24 @@ eccentricity3=0 self._load_moe_di_stefano_data() # record the prev value - prev_M1_value_ms = self.grid_options['m&s_options'].get('M_1', None) + prev_M1_value_ms = self.grid_options["m&s_options"].get("M_1", None) # Set value of M1 of the current system - self.grid_options['m&s_options']['M_1'] = system_dict['M_1'] + self.grid_options["m&s_options"]["M_1"] = system_dict["M_1"] # Calculate the multiplicity fraction - multiplicity_fraction_list = Moe_di_Stefano_2017_multiplicity_fractions(self.grid_options['m&s_options'], self.grid_options["verbosity"]) + multiplicity_fraction_list = Moe_di_Stefano_2017_multiplicity_fractions( + self.grid_options["m&s_options"], self.grid_options["verbosity"] + ) # Turn into dict - multiplicity_fraction_dict = {el+1:multiplicity_fraction_list[el] for el in range(len(multiplicity_fraction_list))} + multiplicity_fraction_dict = { + el + 1: multiplicity_fraction_list[el] + for el in range(len(multiplicity_fraction_list)) + } # Set the prev value back - self.grid_options['m&s_options']['M_1'] = prev_M1_value_ms + self.grid_options["m&s_options"]["M_1"] = prev_M1_value_ms # we don't know what to do next else: @@ -4295,12 +4438,15 @@ eccentricity3=0 raise ValueError(msg) verbose_print( - "Multiplicity: {} multiplicity_fraction: {}".format(system_dict['multiplicity'], multiplicity_fraction_dict[system_dict['multiplicity']]), + "Multiplicity: {} multiplicity_fraction: {}".format( + system_dict["multiplicity"], + multiplicity_fraction_dict[system_dict["multiplicity"]], + ), self.grid_options["verbosity"], 2, ) - return multiplicity_fraction_dict[system_dict['multiplicity']] + return multiplicity_fraction_dict[system_dict["multiplicity"]] ###################### # Status logging @@ -4328,73 +4474,82 @@ eccentricity3=0 dn = system_number - self.shared_memory["prev_log_system_number"][0] else: # average over n_saved_log_stats - dt = self.shared_memory["prev_log_time"][0] - self.shared_memory["prev_log_time"][n-1] - dn = self.shared_memory["prev_log_system_number"][0] - self.shared_memory["prev_log_system_number"][n-1] + dt = ( + self.shared_memory["prev_log_time"][0] + - self.shared_memory["prev_log_time"][n - 1] + ) + dn = ( + self.shared_memory["prev_log_system_number"][0] + - self.shared_memory["prev_log_system_number"][n - 1] + ) eta, units, tpr, eta_secs = trem( - dt, - system_number, - dn, - self.grid_options["_total_starcount"] + dt, system_number, dn, self.grid_options["_total_starcount"] ) if eta_secs < secs_per_day: fintime = time.localtime(now + eta_secs) - etf = "{hours:02d}:{minutes:02d}:{seconds:02d}".format(hours = fintime.tm_hour, - minutes = fintime.tm_min, - seconds = fintime.tm_sec) + etf = "{hours:02d}:{minutes:02d}:{seconds:02d}".format( + hours=fintime.tm_hour, minutes=fintime.tm_min, seconds=fintime.tm_sec + ) else: - d = int(eta_secs/secs_per_day) + d = int(eta_secs / secs_per_day) if d == 1: etf = "Tomorrow" else: etf = "In {} days".format(d) # modulo information - if self.grid_options['modulo'] == 1: - modulo = '' # usual case + if self.grid_options["modulo"] == 1: + modulo = "" # usual case else: - modulo = '%' + str(self.grid_options['modulo']) + modulo = "%" + str(self.grid_options["modulo"]) # add up memory use from each thread total_mem_use = sum(self.shared_memory["memory_use_per_thread"]) # make a string to describe the system e.g. M1, M2, etc. system_string = "" - if 'multiplicity' in system_dict: - for i in range(system_dict['multiplicity']): - i1 = str(i+1) - system_string += "M{}=".format(i1) + format_number(system_dict['M_'+i1]) + " " - if 'separation' in system_dict: - system_string += "a=" + format_number(system_dict['separation']) - if 'orbital_period' in system_dict: - system_string += "P=" + format_number(system_dict['orbital_period']) + if "multiplicity" in system_dict: + for i in range(system_dict["multiplicity"]): + i1 = str(i + 1) + system_string += ( + "M{}=".format(i1) + format_number(system_dict["M_" + i1]) + " " + ) + if "separation" in system_dict: + system_string += "a=" + format_number(system_dict["separation"]) + if "orbital_period" in system_dict: + system_string += "P=" + format_number(system_dict["orbital_period"]) # do the print verbose_print( "{opening_colour}{system_number}/{total_starcount}{modulo} {pc_colour}{pc_complete:5.1f}% complete {time_colour}{hours:02d}:{minutes:02d}:{seconds:02d} {ETA_colour}ETA={ETA:7.1f}{units} tpr={tpr:2.2e} {ETF_colour}ETF={ETF} {mem_use_colour}mem:{mem_use:.1f}MB {system_string_colour}{system_string}{closing_colour}".format( - opening_colour = self.ANSI_colours['reset'] + self.ANSI_colours['yellow on black'], - system_number = system_number, - total_starcount = self.grid_options["_total_starcount"], - modulo = modulo, - pc_colour = self.ANSI_colours['blue on black'], - pc_complete=(100.0*system_number)/(1.0*self.grid_options["_total_starcount"]) if self.grid_options["_total_starcount"] else -1, - time_colour = self.ANSI_colours['green on black'], - hours = localtime.tm_hour, - minutes = localtime.tm_min, - seconds = localtime.tm_sec, - ETA_colour = self.ANSI_colours['red on black'], - ETA = eta, - units = units, - tpr = tpr, - ETF_colour = self.ANSI_colours['blue'], - ETF = etf, - mem_use_colour = self.ANSI_colours['magenta'], - mem_use = total_mem_use, - system_string_colour = self.ANSI_colours['yellow'], + opening_colour=self.ANSI_colours["reset"] + + self.ANSI_colours["yellow on black"], + system_number=system_number, + total_starcount=self.grid_options["_total_starcount"], + modulo=modulo, + pc_colour=self.ANSI_colours["blue on black"], + pc_complete=(100.0 * system_number) + / (1.0 * self.grid_options["_total_starcount"]) + if self.grid_options["_total_starcount"] + else -1, + time_colour=self.ANSI_colours["green on black"], + hours=localtime.tm_hour, + minutes=localtime.tm_min, + seconds=localtime.tm_sec, + ETA_colour=self.ANSI_colours["red on black"], + ETA=eta, + units=units, + tpr=tpr, + ETF_colour=self.ANSI_colours["blue"], + ETF=etf, + mem_use_colour=self.ANSI_colours["magenta"], + mem_use=total_mem_use, + system_string_colour=self.ANSI_colours["yellow"], system_string=system_string, - closing_colour = self.ANSI_colours['reset'] + closing_colour=self.ANSI_colours["reset"], ), self.grid_options["verbosity"], - 1 + 1, ) diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py index 140ea2e25e8d24597b3142b11da9d327ba28d07d..065d2412703c3aac854fdecdf6baa20caead6aa9 100644 --- a/binarycpython/utils/grid_options_defaults.py +++ b/binarycpython/utils/grid_options_defaults.py @@ -31,7 +31,7 @@ grid_options_defaults_dict = { ########################## "amt_cores": 1, # total amount of cores used to evolve the population "parse_function": None, # Function to parse the output with. - "multiplicity_fraction_function": 0, # Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: M&S 2017 + "multiplicity_fraction_function": 0, # Which multiplicity fraction function to use. 0: None, 1: Arenou 2010, 2: Rhagavan 2010, 3: M&S 2017 "tmp_dir": temp_dir(), # Setting the temp dir of the program "_main_pid": -1, # Placeholder for the main process id of the run. "combine_ensemble_with_thread_joining": True, # Flag on whether to combine everything and return it to the user or if false: write it to data_dir/ensemble_output_{population_id}_{thread_id}.json @@ -39,12 +39,11 @@ grid_options_defaults_dict = { "log_runtime_systems": 0, # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir) "_actually_evolve_system": True, # Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid "max_queue_size": 1000, # Maximum size of the system call queue. - "run_zero_probability_system": True, # Whether to run the zero probability systems + "run_zero_probability_system": True, # Whether to run the zero probability systems "_zero_prob_stars_skipped": 0, - "ensemble_factor_in_probability_weighted_mass": False, # Whether to multiply the ensemble results by 1/probability_weighted_mass - "do_dry_run": True, # Whether to do a dry run to calculate the total probability for this run - "exit_after_dry_run": False, # Exit after dry run? - + "ensemble_factor_in_probability_weighted_mass": False, # Whether to multiply the ensemble results by 1/probability_weighted_mass + "do_dry_run": True, # Whether to do a dry run to calculate the total probability for this run + "exit_after_dry_run": False, # Exit after dry run? ########################## # Execution log: ########################## @@ -52,8 +51,8 @@ grid_options_defaults_dict = { "log_file": os.path.join( temp_dir(), "binary_c_python.log" ), # Set to None to not log to file. The directory will be created - "log_dt" : 5, # time between vb=1 logging outputs - "n_logging_stats" : 50, # number of logging stats used to calculate time remaining (etc.) default = 50 + "log_dt": 5, # time between vb=1 logging outputs + "n_logging_stats": 50, # number of logging stats used to calculate time remaining (etc.) default = 50 ########################## # binary_c files ########################## @@ -70,9 +69,9 @@ grid_options_defaults_dict = { ########################## # M&S internal settings ########################## - "_loaded_ms_data": False, # Holds flag whether the M&S data is loaded into memory - "_set_ms_grid": False, # Whether the M&S grid has been loaded - "m&s_options": None, # Holds the M&S options. + "_loaded_ms_data": False, # Holds flag whether the M&S data is loaded into memory + "_set_ms_grid": False, # Whether the M&S grid has been loaded + "m&s_options": None, # Holds the M&S options. ########################## # Custom logging ########################## @@ -504,7 +503,9 @@ grid_options_descriptions = { ### # -MIN_MASS_BINARY_C = float(return_binary_c_version_info(parsed=True)['macros']['BINARY_C_MINIMUM_STELLAR_MASS']) +MIN_MASS_BINARY_C = float( + return_binary_c_version_info(parsed=True)["macros"]["BINARY_C_MINIMUM_STELLAR_MASS"] +) # Default options for the Moe & di Stefano grid @@ -530,7 +531,8 @@ moe_di_stefano_default_options = { "ranges": { # stellar masses (Msun) "M": [ - MIN_MASS_BINARY_C*1.05, # 0.08 is a tad bit above the minimum mass. Don't sample at 0.07, otherwise the first row of q values will have a phasevol of 0. Anything higher is fine. + MIN_MASS_BINARY_C + * 1.05, # 0.08 is a tad bit above the minimum mass. Don't sample at 0.07, otherwise the first row of q values will have a phasevol of 0. Anything higher is fine. 80.0, # (rather arbitrary) upper mass cutoff ], "q": [ @@ -541,8 +543,7 @@ moe_di_stefano_default_options = { "ecc": [0.0, 0.99], }, # minimum stellar mass - "Mmin": MIN_MASS_BINARY_C, # We take the value that binary_c has set as the default - + "Mmin": MIN_MASS_BINARY_C, # We take the value that binary_c has set as the default # multiplicity model (as a function of log10M1) # # You can use 'Poisson' which uses the system multiplicity @@ -553,7 +554,6 @@ moe_di_stefano_default_options = { # from the data, but then triples and quadruples are # combined (and there are NO quadruples). "multiplicity_model": "Poisson", - # multiplicity modulator: # [single, binary, triple, quadruple] # @@ -821,7 +821,9 @@ def print_option_descriptions(filehandle, options, descriptions, title, extra_te for option in sorted(options): if option in descriptions: print( - "| **{}**: {}".format(option, descriptions[option].replace("\n", "\n\t")), + "| **{}**: {}".format( + option, descriptions[option].replace("\n", "\n\t") + ), file=filehandle, ) else: diff --git a/binarycpython/utils/plot_functions.py b/binarycpython/utils/plot_functions.py index 002b4272b796d8df3f8dfcb78ef5b9e50fd686ca..19bfbd45d3f090d69fb06c0ba4d5ab35e3cf5c1d 100644 --- a/binarycpython/utils/plot_functions.py +++ b/binarycpython/utils/plot_functions.py @@ -344,7 +344,7 @@ def plot_masses(df, show_stellar_types: bool = False, show_plot: bool = True): ) # Mass 1 - fig.axes[0].plot(df["time"], df["mass_1"], label="star 1", color='red') + fig.axes[0].plot(df["time"], df["mass_1"], label="star 1", color="red") fig.axes[0].axhline( df["pms_mass_1"].values.tolist()[0], color="red", @@ -354,7 +354,8 @@ def plot_masses(df, show_stellar_types: bool = False, show_plot: bool = True): alpha=0.5, ) fig.axes[0].plot( - df["time"], df["core_mass_1"], + df["time"], + df["core_mass_1"], color="red", linestyle="-.", linewidth=2, @@ -373,7 +374,8 @@ def plot_masses(df, show_stellar_types: bool = False, show_plot: bool = True): alpha=0.5, ) fig.axes[0].plot( - df["time"], df["core_mass_2"], + df["time"], + df["core_mass_2"], color="orange", linestyle="-.", linewidth=2, diff --git a/binarycpython/utils/run_system_wrapper.py b/binarycpython/utils/run_system_wrapper.py index 7d6ccf49ee00639c662bb3544ff322f8284aeb6b..51559f62c95101fdf2d83577cf8d1fccf303ba23 100644 --- a/binarycpython/utils/run_system_wrapper.py +++ b/binarycpython/utils/run_system_wrapper.py @@ -51,7 +51,9 @@ def run_system(**kwargs): # Load available arg keywords available_binary_c_arg_keywords = get_arg_keys() - special_keywords = [el for el in list(available_binary_c_arg_keywords) if el.endswith("%d")] + special_keywords = [ + el for el in list(available_binary_c_arg_keywords) if el.endswith("%d") + ] other_keywords = ["custom_logging_code", "log_filename", "parse_function"] @@ -68,13 +70,13 @@ def run_system(**kwargs): binary_c_args[key] = kwargs[key] elif any( - [ - True - if (key.startswith(param[:-2]) and len(param[:-2]) < len(key)) - else False - for param in special_keywords - ] - ): + [ + True + if (key.startswith(param[:-2]) and len(param[:-2]) < len(key)) + else False + for param in special_keywords + ] + ): binary_c_args[key] = kwargs[key] # Notify user when this key wont be used diff --git a/binarycpython/utils/spacing_functions.py b/binarycpython/utils/spacing_functions.py index e83f91048ee6e35ad54ff5927baed25d19cd9e24..b110037bb8aea17748feac9e89f7e9e3ad7ffad0 100644 --- a/binarycpython/utils/spacing_functions.py +++ b/binarycpython/utils/spacing_functions.py @@ -10,6 +10,7 @@ import math import numpy as np import sys + def const( min_bound: Union[int, float], max_bound: Union[int, float], steps: int ) -> list: @@ -25,7 +26,8 @@ def const( np.linspace(min_bound, max_bound, steps+1) """ - return np.linspace(min_bound, max_bound, steps+1) + return np.linspace(min_bound, max_bound, steps + 1) + ############################################################ def peak_normalized_gaussian_func( @@ -44,20 +46,21 @@ def peak_normalized_gaussian_func( Returns: value of the Gaussian at x """ - gaussian_prefactor = 1.0 #/ math.sqrt(2.0 * math.pi) + gaussian_prefactor = 1.0 # / math.sqrt(2.0 * math.pi) r = 1.0 / sigma y = (x - mean) * r return math.exp(-0.5 * y ** 2) + ############################################################ def gaussian_zoom( - min_bound: Union[int, float], - max_bound: Union[int, float], - zoom_mean: Union[int, float], - zoom_dispersion: Union[int,float], - zoom_magnitude: Union[int,float], - steps: int + min_bound: Union[int, float], + max_bound: Union[int, float], + zoom_mean: Union[int, float], + zoom_dispersion: Union[int, float], + zoom_magnitude: Union[int, float], + steps: int, ) -> list: """ Samples such that a region is zoomed in according to a 1-Gaussian function @@ -77,14 +80,14 @@ def gaussian_zoom( # linear spacing: this is what we'd have # in the absence of a Gaussian zoom - linear_spacing = (max_bound - min_bound)/(steps-1) + linear_spacing = (max_bound - min_bound) / (steps - 1) # make the list of values x = min_bound array = np.array([]) while x <= max_bound: - array = np.append(array,x) - g = peak_normalized_gaussian_func(x,zoom_mean,zoom_dispersion) + array = np.append(array, x) + g = peak_normalized_gaussian_func(x, zoom_mean, zoom_dispersion) f = 1.0 - zoom_magnitude * g dx = linear_spacing * f x = x + dx