Skip to content
Snippets Groups Projects
Commit 3f98ca51 authored by David Hendriks's avatar David Hendriks
Browse files

working on M&S

parent 95f00b0d
No related branches found
No related tags found
No related merge requests found
...@@ -1113,7 +1113,7 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options): ...@@ -1113,7 +1113,7 @@ def Moe_de_Stefano_2017_multiplicity_fractions(options):
# return array reference # return array reference
return result return result
# @profile
def build_q_table(options, m, p): def build_q_table(options, m, p):
############################################################ ############################################################
# #
...@@ -1138,12 +1138,13 @@ def build_q_table(options, m, p): ...@@ -1138,12 +1138,13 @@ def build_q_table(options, m, p):
# We should ignore those parts of the table and renormalize. If we are below the lowest value of qmin in the table we need to extrapolate the data # We should ignore those parts of the table and renormalize. If we are below the lowest value of qmin in the table we need to extrapolate the data
# We can check if we have a cached value for this already: # We can check if we have a cached value for this already:
# TODO: fix this cache check # TODO: fix this cache check.
incache = False incache = False
if Moecache.get("rinterpolator_q_metadata", None): if Moecache.get("rinterpolator_q_metadata", None):
if Moecache["rinterpolator_q_metadata"][m] == options[m]: if (Moecache["rinterpolator_q_metadata"][m]) and (Moecache["rinterpolator_q_metadata"][p]):
if Moecache["rinterpolator_q_metadata"] == options[p]: if (Moecache["rinterpolator_q_metadata"][m] == options[m]) and (Moecache["rinterpolator_q_metadata"][p] == options[p]):
incache = True incache = True
# print("INCACHE: {}".format(incache))
# #
if not incache: if not incache:
...@@ -1167,7 +1168,7 @@ def build_q_table(options, m, p): ...@@ -1167,7 +1168,7 @@ def build_q_table(options, m, p):
qeps = 1e-8 # small number but such that qeps+1 != 1 qeps = 1e-8 # small number but such that qeps+1 != 1
if qeps + 1 == 1.0: if qeps + 1 == 1.0:
printf("qeps (= {}) +1 == 1. Make qeps larger".format(qeps)) print("qeps (= {}) +1 == 1. Make qeps larger".format(qeps))
if qmin >= qmax: if qmin >= qmax:
# there may be NO binaries in this part of the parameter space: # there may be NO binaries in this part of the parameter space:
...@@ -1335,6 +1336,7 @@ def build_q_table(options, m, p): ...@@ -1335,6 +1336,7 @@ def build_q_table(options, m, p):
else: else:
print("No other methods available") print("No other methods available")
raise ValueError raise ValueError
# TODO: consider implementing this # TODO: consider implementing this
# elsif($method =~ /^(log)?poly(\d+)/) # elsif($method =~ /^(log)?poly(\d+)/)
# { # {
...@@ -1393,10 +1395,30 @@ def build_q_table(options, m, p): ...@@ -1393,10 +1395,30 @@ def build_q_table(options, m, p):
for q in sorted(qdata.keys()): for q in sorted(qdata.keys()):
tmp_table.append([q, qdata[q]]) tmp_table.append([q, qdata[q]])
if Moecache.get("rinterpolator_q_given_{}_log10{}".format(m, p), None):
print("Present interpolator: {}".format(Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]))
print("Destroying present interpolator:")
interpolator = Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
print(interpolator)
print(type(interpolator))
print(dir(interpolator))
x=Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate([0.5])
print("Interpolated a value q=0.5: {}".format(x))
Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].destroy()
print(interpolator)
print(type(interpolator))
print(dir(interpolator))
print("Present interpolator: {}".format(Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]))
x=Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)].interpolate([0.5])
print("Interpolated a value q=0.5: {}".format(x))
# del Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)]
print("CREATING A NEW TABLE Q table")
# Make an interpolation table to contain our modified data # Make an interpolation table to contain our modified data
q_interpolator = py_rinterpolate.Rinterpolate( q_interpolator = py_rinterpolate.Rinterpolate(
table=tmp_table, nparams=1, ndata=1 # Contains the table of data # q # table=tmp_table, nparams=1, ndata=1 # Contains the table of data # q #
) )
print("CREATed A NEW TABLE Q table")
# TODO: build a check in here to see if the interpolator build was successful # TODO: build a check in here to see if the interpolator build was successful
# print("Can renormalize?: {}".format(can_renormalize)) # print("Can renormalize?: {}".format(can_renormalize))
...@@ -1434,7 +1456,10 @@ def build_q_table(options, m, p): ...@@ -1434,7 +1456,10 @@ def build_q_table(options, m, p):
print("Error: > 1e-6 in q probability integral: {}".format(I)) print("Error: > 1e-6 in q probability integral: {}".format(I))
# set this new table in the cache # set this new table in the cache
print("STORING Q INTERPOLATOR AS {}".format("rinterpolator_q_given_{}_log10{}".format(m, p)))
Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator
print("STORed Q INTERPOLATOR AS {}".format("rinterpolator_q_given_{}_log10{}".format(m, p)))
if not Moecache.get("rinterpolator_q_metadata", None): if not Moecache.get("rinterpolator_q_metadata", None):
Moecache["rinterpolator_q_metadata"] = {} Moecache["rinterpolator_q_metadata"] = {}
Moecache["rinterpolator_q_metadata"][m] = options[m] Moecache["rinterpolator_q_metadata"][m] = options[m]
...@@ -1756,7 +1781,8 @@ def Moe_de_Stefano_2017_pdf(options): ...@@ -1756,7 +1781,8 @@ def Moe_de_Stefano_2017_pdf(options):
print_info = 1 print_info = 1
if print_info: if print_info:
print("Probability density") # print("Probability density")
if multiplicity == 1: if multiplicity == 1:
print( print(
"M1={} q=N/A log10P=N/A ({}): {} -> {}\n".format( "M1={} q=N/A log10P=N/A ({}): {} -> {}\n".format(
......
...@@ -580,9 +580,9 @@ def parse_binary_c_version_info(version_info_string: str) -> dict: ...@@ -580,9 +580,9 @@ def parse_binary_c_version_info(version_info_string: str) -> dict:
new_split = "".join(split_info[1:]).split(" is ") new_split = "".join(split_info[1:]).split(" is ")
param_name = new_split[0] param_name = new_split[0]
param_value = " is ".join(new_split[1:]) param_value = " is ".join(new_split[1:])
# Sometimes the macros have extra information behind it. Needs an update in outputting by binary_c # Sometimes the macros have extra information behind it. Needs an update in outputting by binary_c
try: try:
print(param_name, param_type, param_value)
macros_dict[param_name] = param_type_dict[param_type](param_value) macros_dict[param_name] = param_type_dict[param_type](param_value)
except ValueError: except ValueError:
macros_dict[param_name] = str(param_value) macros_dict[param_name] = str(param_value)
...@@ -1588,16 +1588,21 @@ def extract_ensemble_json_from_string(binary_c_output: str) -> dict: ...@@ -1588,16 +1588,21 @@ def extract_ensemble_json_from_string(binary_c_output: str) -> dict:
json dictionary with the parsed ENSEMBLE_JSON data json dictionary with the parsed ENSEMBLE_JSON data
""" """
json = None json_dict = None
try: try:
# If there is no output just return an empty dict:
if not binary_c_output:
json_dict = {}
return json_dict
ensemble_jsons_strings = [ ensemble_jsons_strings = [
line line
for line in binary_c_output.splitlines() for line in binary_c_output.splitlines()
if line.startswith("ENSEMBLE_JSON") if line.startswith("ENSEMBLE_JSON")
] ]
json = handle_ensemble_string_to_json( json_dict = handle_ensemble_string_to_json(
ensemble_jsons_strings[0][len("ENSEMBLE_JSON ") :] ensemble_jsons_strings[0][len("ENSEMBLE_JSON ") :]
) )
...@@ -1614,7 +1619,7 @@ def extract_ensemble_json_from_string(binary_c_output: str) -> dict: ...@@ -1614,7 +1619,7 @@ def extract_ensemble_json_from_string(binary_c_output: str) -> dict:
0, 0,
) )
return json return json_dict
class binarycDecoder(json.JSONDecoder): class binarycDecoder(json.JSONDecoder):
......
...@@ -692,6 +692,9 @@ class Population: ...@@ -692,6 +692,9 @@ class Population:
If neither of the above is set, we continue without using HPC routines If neither of the above is set, we continue without using HPC routines
(that doesn't mean this cannot be run on a server with many cores) (that doesn't mean this cannot be run on a server with many cores)
Returns an dictionary containing the analytics of the run
TODO: change the way this is done. Slurm & CONDOR should probably do this different
""" """
# Just to make sure we don't have stuff from a previous run hanging around # Just to make sure we don't have stuff from a previous run hanging around
...@@ -728,6 +731,9 @@ class Population: ...@@ -728,6 +731,9 @@ class Population:
"total_probability_weighted_mass_run": self.grid_options["_total_probability_weighted_mass_run"], "total_probability_weighted_mass_run": self.grid_options["_total_probability_weighted_mass_run"],
} }
# print(Moecache)
print(Moecache.keys())
## ##
# Clean up code: remove files, unset values. This is placed in the general evolve function, # Clean up code: remove files, unset values. This is placed in the general evolve function,
# because that makes for easier control # because that makes for easier control
...@@ -876,6 +882,10 @@ class Population: ...@@ -876,6 +882,10 @@ class Population:
pool.close() pool.close()
pool.join() pool.join()
print("OUTSIDE THREAD")
print(Moecache.keys())
print("OUTSIDE THREAD")
# Handle the results by merging all the dictionaries. How that merging happens exactly is # Handle the results by merging all the dictionaries. How that merging happens exactly is
# described in the merge_dicts description. # described in the merge_dicts description.
combined_output_dict = {} combined_output_dict = {}
...@@ -945,6 +955,15 @@ class Population: ...@@ -945,6 +955,15 @@ class Population:
This function is called by _evolve_population_grid This function is called by _evolve_population_grid
""" """
import tracemalloc
tracemalloc.start()
# ... start your application ...
snapshot1 = tracemalloc.take_snapshot()
# set start timer # set start timer
start_process_time = datetime.datetime.now() start_process_time = datetime.datetime.now()
...@@ -1055,6 +1074,7 @@ class Population: ...@@ -1055,6 +1074,7 @@ class Population:
# they match the keys known to binary_c. # they match the keys known to binary_c.
# Won't do that every system cause that is a bit of a waste of computing time. # Won't do that every system cause that is a bit of a waste of computing time.
if localcounter == 0: if localcounter == 0:
# TODO: Put this someplace else and wrap in a functioncall
for key in full_system_dict.keys(): for key in full_system_dict.keys():
if not key in self.available_keys: if not key in self.available_keys:
# Deal with special keys # Deal with special keys
...@@ -1075,9 +1095,10 @@ class Population: ...@@ -1075,9 +1095,10 @@ class Population:
raise ValueError(msg) raise ValueError(msg)
start_runtime_binary_c = time.time() start_runtime_binary_c = time.time()
# TODO: build flag to actually evolve the system
# Evolve the system # Evolve the system
self._evolve_system_mp(full_system_dict) if self.grid_options['_actually_evolve_system']:
self._evolve_system_mp(full_system_dict)
end_runtime_binary_c = time.time() end_runtime_binary_c = time.time()
...@@ -1216,6 +1237,24 @@ class Population: ...@@ -1216,6 +1237,24 @@ class Population:
0, 0,
) )
print("INSIDE THREAD")
print(Moecache.keys())
for key in Moecache.keys():
print(key, Moecache[key])
print("")
print("INSIDE THREAD")
# ... call the function leaking memory ...
snapshot2 = tracemalloc.take_snapshot()
top_stats = snapshot2.compare_to(snapshot1, 'lineno')
print("[ Top 10 differences ]")
for stat in top_stats[:10]:
print(stat)
return output_dict return output_dict
# Single system # Single system
...@@ -1945,6 +1984,7 @@ class Population: ...@@ -1945,6 +1984,7 @@ class Population:
# code_string += indent * (depth + 2) + "print('phasevol_lnm1: ',phasevol_lnm1); print('phasevol_multiplicity: ',phasevol_multiplicity);\n" # code_string += indent * (depth + 2) + "print('phasevol_lnm1: ',phasevol_lnm1); print('phasevol_multiplicity: ',phasevol_multiplicity);\n"
# code_string += indent * (depth + 2) + "print(probabilities_list)\n" # code_string += indent * (depth + 2) + "print(probabilities_list)\n"
# code_string += indent * (depth + 2) + "print(parameter_dict)\n" # code_string += indent * (depth + 2) + "print(parameter_dict)\n"
code_string += indent * (depth + 2) + "print('YOO IK GA LEKKER NOG EEN RONDJE')\n"
code_string += indent * (depth + 2) + "yield(parameter_dict)\n" code_string += indent * (depth + 2) + "yield(parameter_dict)\n"
# If its a dry run, dont do anything with it # If its a dry run, dont do anything with it
...@@ -2981,6 +3021,7 @@ class Population: ...@@ -2981,6 +3021,7 @@ class Population:
] ]
) )
print("Size multiplicity table: {}", len(Moecache["multiplicity_table"]))
############################################################ ############################################################
# a small log10period which we can shift just outside the # a small log10period which we can shift just outside the
# table to force integration out there to zero # table to force integration out there to zero
...@@ -3045,6 +3086,7 @@ class Population: ...@@ -3045,6 +3086,7 @@ class Population:
/ dlog10P, / dlog10P,
] ]
) )
print("Size period_distributions table: {}", len(Moecache["period_distributions"]))
############################################################ ############################################################
# distributions as a function of mass, period, q # distributions as a function of mass, period, q
...@@ -3123,6 +3165,7 @@ class Population: ...@@ -3123,6 +3165,7 @@ class Population:
0.0, 0.0,
] ]
) )
print("Size period_distributions table: {}".format(len(Moecache["period_distributions"])))
# Write to logfile # Write to logfile
with open("/tmp/moecache.json", "w") as cache_filehandle: with open("/tmp/moecache.json", "w") as cache_filehandle:
...@@ -3243,7 +3286,7 @@ class Population: ...@@ -3243,7 +3286,7 @@ class Population:
resolution=options["resolutions"]["logP"][0], resolution=options["resolutions"]["logP"][0],
probdist=1.0, probdist=1.0,
condition='(self.grid_options["multiplicity"] >= 2)', condition='(self.grid_options["multiplicity"] >= 2)',
branchpoint=1, # branchpoint=1,
gridtype="centred", gridtype="centred",
dphasevol="({} * dlog10per)".format(LOG_LN_CONVERTER), dphasevol="({} * dlog10per)".format(LOG_LN_CONVERTER),
valuerange=[options["ranges"]["logP"][0], options["ranges"]["logP"][1]], valuerange=[options["ranges"]["logP"][0], options["ranges"]["logP"][1]],
......
...@@ -33,6 +33,7 @@ grid_options_defaults_dict = { ...@@ -33,6 +33,7 @@ grid_options_defaults_dict = {
# "output_dir": # "output_dir":
"_commandline_input": "", "_commandline_input": "",
"log_runtime_systems": 0, # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir) "log_runtime_systems": 0, # whether to log the runtime of the systems (1 file per thread. stored in the tmp_dir)
"_actually_evolve_system": True, # Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid
########################## ##########################
# Execution log: # Execution log:
########################## ##########################
...@@ -469,6 +470,7 @@ grid_options_descriptions = { ...@@ -469,6 +470,7 @@ grid_options_descriptions = {
"log_runtime_systems": "Whether to log the runtime of the systems . Each systems run by the thread is logged to a file and is stored in the tmp_dir. (1 file per thread). Don't use this if you are planning to run alot of systems. This is mostly for debugging and finding systems that take long to run. Integer, default = 0. if value is 1 then the systems are logged", "log_runtime_systems": "Whether to log the runtime of the systems . Each systems run by the thread is logged to a file and is stored in the tmp_dir. (1 file per thread). Don't use this if you are planning to run alot of systems. This is mostly for debugging and finding systems that take long to run. Integer, default = 0. if value is 1 then the systems are logged",
"_total_mass_run": "To count the total mass that thread/process has ran", "_total_mass_run": "To count the total mass that thread/process has ran",
"_total_probability_weighted_mass_run": "To count the total mass * probability for each system that thread/process has ran", "_total_probability_weighted_mass_run": "To count the total mass * probability for each system that thread/process has ran",
"_actually_evolve_system": "Whether to actually evolve the systems of just act as if. for testing. used in _process_run_population_grid"
} }
### ###
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment