diff --git a/binarycpython/utils/Moe_di_Stefano_2017.py b/binarycpython/utils/Moe_di_Stefano_2017.py index 7f17e6ae23af8eb4894ab96565583291831c9076..fdf507a64bcce8692fb4d0606e8652088b6caf81 100644 --- a/binarycpython/utils/Moe_di_Stefano_2017.py +++ b/binarycpython/utils/Moe_di_Stefano_2017.py @@ -11,15 +11,10 @@ from binarycpython.utils.functions import ( ) from binarycpython.utils.dicts import ( update_dicts, + normalize_dict ) from binarycpython.utils.distribution_functions import ( Moecache, - Moe_di_Stefano_2017_multiplicity_fractions, - get_max_multiplicity, - Arenou2010_binary_fraction, - raghavan2010_binary_fraction, - normalize_dict, - fill_data, LOG_LN_CONVERTER, ) from binarycpython.utils.grid_options_defaults import ( @@ -259,7 +254,7 @@ class Moe_di_Stefano_2017(): qs = sorted(json_data["log10M1"][logmass]["logP"][logperiod]["q"]) # Fill the data and 'normalise' - qdata = fill_data( + qdata = self.fill_data( qs, json_data["log10M1"][logmass]["logP"][logperiod]["q"] ) @@ -277,7 +272,7 @@ class Moe_di_Stefano_2017(): eccs = sorted(json_data["log10M1"][logmass]["logP"][logperiod]["e"]) # Fill the data and 'normalise' - ecc_data = fill_data( + ecc_data = self.fill_data( eccs, json_data["log10M1"][logmass]["logP"][logperiod]["e"] ) @@ -376,7 +371,7 @@ class Moe_di_Stefano_2017(): # first, the multiplicity, this is 1,2,3,4, ... # for singles, binaries, triples, quadruples, ... - max_multiplicity = get_max_multiplicity( + max_multiplicity = self.get_max_multiplicity( self.grid_options["Moe2017_options"]["multiplicity_modulator"] ) verbose_print( @@ -393,7 +388,7 @@ class Moe_di_Stefano_2017(): parameter_name="multiplicity", longname="multiplicity", valuerange=[1, max_multiplicity], - samplerfunc="const_int(1, {n}, {n})".format(n=max_multiplicity), + samplerfunc="self.const_int(1, {n}, {n})".format(n=max_multiplicity), precode='self.grid_options["multiplicity"] = multiplicity; self.bse_options["multiplicity"] = multiplicity; options={}'.format( self.grid_options["Moe2017_options"] ), @@ -413,7 +408,7 @@ class Moe_di_Stefano_2017(): parameter_name="M_1", longname="Primary mass", samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][0] - or "const(np.log({}), np.log({}), {})".format( + or "self.const_linear(np.log({}), np.log({}), {})".format( self.grid_options["Moe2017_options"]["ranges"]["M"][0], self.grid_options["Moe2017_options"]["ranges"]["M"][1], self.grid_options["Moe2017_options"]["resolutions"]["M"][0], @@ -429,7 +424,7 @@ class Moe_di_Stefano_2017(): gridtype="centred", dphasevol="dlnm1", precode='M_1 = np.exp(lnm1); options["M_1"]=M_1', - probdist="Moe_di_Stefano_2017_pdf({{{}, {}, {}}}, verbosity=self.grid_options['verbosity'])['total_probdens'] if multiplicity == 1 else 1".format( + probdist="self.Moe_di_Stefano_2017_pdf({{{}, {}, {}}}, verbosity=self.grid_options['verbosity'])['total_probdens'] if multiplicity == 1 else 1".format( str(dict(self.grid_options["Moe2017_options"]))[1:-1], "'multiplicity': multiplicity", "'M_1': M_1", @@ -457,7 +452,7 @@ class Moe_di_Stefano_2017(): samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][ "logP" ][0] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["logP"][0], self.grid_options["Moe2017_options"]["ranges"]["logP"][1], self.grid_options["Moe2017_options"]["resolutions"]["logP"][0], @@ -495,7 +490,7 @@ M_2 = q * M_1 sep = calc_sep_from_period(M_1, M_2, orbital_period) """, samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"]["M"][1] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["q"][0] if self.grid_options["Moe2017_options"] .get("ranges", {}) @@ -529,7 +524,7 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period) samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][ "ecc" ][0] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["ecc"][ 0 ], # Just fail if not defined. @@ -559,7 +554,7 @@ sep = calc_sep_from_period(M_1, M_2, orbital_period) samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][ "logP" ][1] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["logP"][0], self.grid_options["Moe2017_options"]["ranges"]["logP"][1], self.grid_options["Moe2017_options"]["resolutions"]["logP"][1], @@ -601,7 +596,7 @@ eccentricity2=0 samplerfunc=self.grid_options["Moe2017_options"]["samplerfuncs"][ "M" ][2] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["q"][0] if self.grid_options["Moe2017_options"] .get("ranges", {}) @@ -635,7 +630,7 @@ eccentricity2=0 samplerfunc=self.grid_options["Moe2017_options"][ "samplerfuncs" ]["ecc"][1] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["ecc"][ 0 ], # Just fail if not defined. @@ -666,7 +661,7 @@ eccentricity2=0 samplerfunc=self.grid_options["Moe2017_options"][ "samplerfuncs" ]["logP"][2] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["logP"][0], self.grid_options["Moe2017_options"]["ranges"]["logP"][1], self.grid_options["Moe2017_options"]["resolutions"]["logP"][ @@ -709,7 +704,7 @@ eccentricity3=0 samplerfunc=self.grid_options["Moe2017_options"][ "samplerfuncs" ]["M"][3] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["q"][0] if self.grid_options["Moe2017_options"] .get("ranges", {}) @@ -748,7 +743,7 @@ eccentricity3=0 samplerfunc=self.grid_options["Moe2017_options"][ "samplerfuncs" ]["ecc"][2] - or "const({}, {}, {})".format( + or "self.const_linear({}, {}, {})".format( self.grid_options["Moe2017_options"]["ranges"]["ecc"][ 0 ], # Just fail if not defined. @@ -782,7 +777,7 @@ eccentricity3=0 '"ecc3": eccentricity3', ) - probdist_addition = "Moe_di_Stefano_2017_pdf({}, verbosity=self.grid_options['verbosity'])['total_probdens']".format( + probdist_addition = "self.Moe_di_Stefano_2017_pdf({}, verbosity=self.grid_options['verbosity'])['total_probdens']".format( updated_options ) @@ -895,7 +890,7 @@ eccentricity3=0 3, ) - binary_fraction = Arenou2010_binary_fraction(system_dict["M_1"]) + binary_fraction = self.Arenou2010_binary_fraction(system_dict["M_1"]) multiplicity_fraction_dict = { 1: 1 - binary_fraction, 2: binary_fraction, @@ -911,7 +906,7 @@ eccentricity3=0 3, ) - binary_fraction = raghavan2010_binary_fraction(system_dict["M_1"]) + binary_fraction = self.raghavan2010_binary_fraction(system_dict["M_1"]) multiplicity_fraction_dict = { 1: 1 - binary_fraction, 2: binary_fraction, @@ -947,7 +942,7 @@ eccentricity3=0 self.grid_options["Moe2017_options"]["M_1"] = system_dict["M_1"] # Calculate the multiplicity fraction - multiplicity_fraction_list = Moe_di_Stefano_2017_multiplicity_fractions( + multiplicity_fraction_list = self.Moe_di_Stefano_2017_multiplicity_fractions( self.grid_options["Moe2017_options"], self.grid_options["verbosity"] ) diff --git a/binarycpython/utils/cache.py b/binarycpython/utils/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..001abc02845f61069f3b7afcb1eec355c24cee96 --- /dev/null +++ b/binarycpython/utils/cache.py @@ -0,0 +1,216 @@ +""" +Module containing (e.g. LRU) cache functionality for binary_c-python. + +We use cachetools when possible because this allows us to set up the +cache of the appropriate size for the task in the grid_options dict. +Please see the LRU_* options in there. + +""" +import cachetools +import curses +import functools +import importlib +import inspect +import time + +class cache(): + class NullCache(cachetools.Cache): + """ + A cachetools cache object that does as little as possible and never matches. + """ + def __init__(self, *args, **kwargs): + return None + def popitem(self): + return # do nothing + def __getitem__(self, key): + return self.__missing__(key) + def __setitem__(self, key, value): + return + def __delitem__(self, key): + return + + def __init__(self, **kwargs): + # don't do anything: we just inherit from this class + return + + def setup_function_cache(self,vb=False,type=None): + """ + Function to wrap binary_c-python's functions in function cache. + + The functions listed in self.grid_options['function_cache_functions'] are + given caches of size self.grid_options['function_cache_size'][func] + + Args: None + """ + # choose a type of cache + + # add our custom NullCache to the cachetools selection + setattr(cachetools,'NullCache',self.NullCache) + + if not self.grid_options["function_cache"]: + # no function cache: set all to NullCache + for func in self.grid_options['function_cache_functions'].keys(): + self.function_cache[func] = cachetype(NullCache) + + for func in self.grid_options['function_cache_functions'].keys(): + (maxsize,cachetype,testargs) = self.grid_options['function_cache_functions'].get(func) + + # which cache should we use? + if type: + # use type passed in, if given + usecachetype = type + elif not self.grid_options["function_cache"]: + # function cache is disabled, use NoCache + usecachetype = "NoCache" + else: + if cachetype == None: + # use the default type + usecachetype = self.grid_options["function_cache_default_type"] + else: + # use type passed in + usecachetype = cachetype + + if vb: + print("Setup cache for func {func} : maxsize={maxsize}, cachetype={cachetype} -> use {usecachetype}".format( + func=func, + maxsize=maxsize, + cachetype=cachetype, + usecachetype=usecachetype, + )) + + if usecachetype == 'TTLCache': + extra_cacheargs = [self.grid_options["function_cache_TTL"]] + else: + extra_cacheargs = [] + + # detect if the function is already wrapped + x = func.split('.') + modulename = 'binarycpython.utils.' + x[0] + module = importlib.import_module(modulename) + _method = eval('module.{}.{}'.format(x[0],x[1])) + _wrapped = getattr(_method,"__wrapped__",False) + + # if function is wrapped... + if _wrapped and id(_method) != id(_wrapped): + # save the wrapped function (this calls the cache) + if not (func in self.cached_function_cache): + self.cached_function_cache[func] = _method + self.original_function_cache[func] = _wrapped + + if usecachetype == 'NoCache': + # unwrap if we're after NoCache + _code = 'module.{}.{} = _wrapped'.format(x[0],x[1]) + exec(_code) + else: + # function isn't wrapped, which means it was previously + # unwrapped, so rewrap it if not using NoCache + if usecachetype != 'NoCache': + _code = 'module.{}.{} = self.cached_function_cache["{}"]'.format(x[0],x[1],func) + exec(_code) + + # check we're not still wrapped + _method = eval('module' + '.' + x[0] + '.' + x[1]) + _wrapped = getattr(_method,"__wrapped__",False) + + # if NoCache (explicity use no cache), just use NullCache + # (it's never actually set) + if usecachetype == "NoCache": + cachetools_func = getattr(cachetools,"NullCache") + else: + cachetools_func = getattr(cachetools,usecachetype) + + if maxsize == 0: + maxsize = self.grid_options['function_cache_default_maxsize'] + + if vb: + print("Make function cache for func {func}, maxsize {maxsize}".format( + func=func, + maxsize=maxsize + )) + + # set up cache function args + if maxsize is None: + args = [2] + else: + args = [maxsize] + args += extra_cacheargs + + # clear any existing cache + if func in self.caches: + try: + self.caches[func].cache_clear() + except: + pass + del self.caches[func] + + # set up new cache using the appropriate cachetools function + if usecachetype != "NoCache": + self.caches[func] = cachetools_func(*args) + + def test_caches(self,ntests=10000,dt=5.0): + cachetypes = ("NullCache","FIFOCache","LRUCache","TTLCache","NoCache") + functions = self.grid_options['function_cache_functions'].keys() + maxsizes = (0,1,2,4,8,16,32,64,128,256) + + stdscr = curses.initscr() + #curses.noecho() + stdscr.clear() + width = len(maxsizes) * 9 + height = (len(cachetypes) + 2) * len(functions) + + def _coord(n,x,y,xoffset,yoffset): + # map n,x,y,xoffset,yoffset to screen coords + _x = (x + xoffset) * 9 + _y = (len(cachetypes) + 4) * n + (y + yoffset) + return (_x,_y) + + def _print(n,x,y,xoffset,yoffset,str): + # print str at n,x,y,offset + _c = _coord(n,x,y,xoffset,yoffset) + stdscr.addstr(_c[1],_c[0],str) + stdscr.refresh() + + self.grid_options['function_cache'] = True + for n,func in enumerate(functions): + _print(n,0,0,0,0, + "Cache speed test of function {func}".format(func=func)) + for x,maxsize in enumerate(maxsizes): + _print(n,x,0,2,1,str(maxsize)) + + best = 0 + best_type = None + best_maxsize = None + for y,type in enumerate(cachetypes): + _print(n,0,y,0,2,type) + self.grid_options['function_cache_default_type'] = type + self.setup_function_cache() + (maxsize,cachetype,testargs) = self.grid_options['function_cache_functions'].get(func) + x = func.split('.') + modulename = 'binarycpython.utils.' + x[0] + module = importlib.import_module(modulename) + _method = eval('module.{}.{}'.format(x[0],x[1])) + if testargs: + for x,maxsize in enumerate(maxsizes): + if type == 'NoCache' and maxsize > 0: + continue + t0 = time.time() + count = 0 + while time.time() - t0 < dt: + _method(self,*testargs) + count += 1 + + if count < 99999: + _print(n,x,y,2,2,"{:d}".format(count)) + else: + _print(n,x,y,2,2,"{:.2e}".format(float(count))) + if count > best: + best = count + best_type = type + best_maxsize = maxsize + _print(n,0,len(cachetypes)+2,0,0,"Best cache type {type} with maxsize {maxsize}".format(type=best_type,maxsize=best_maxsize)) + + stdscr.refresh() + stdscr.getkey() + curses.echo() + print("\npress any key to exit\n") + curses.endwin() diff --git a/binarycpython/utils/dicts.py b/binarycpython/utils/dicts.py index 36198a52d90a74e0c4f63584120de2f1279b23cf..dcdd189137ed0610a5dad0b28a5f23713f9c805e 100644 --- a/binarycpython/utils/dicts.py +++ b/binarycpython/utils/dicts.py @@ -770,3 +770,59 @@ def filter_dict_through_values(arg_dict: dict, filter_list: list) -> dict: new_dict[key] = arg_dict[key] return new_dict + + +def prepare_dict(global_dict: dict, list_of_sub_keys: list) -> None: + """ + Function that makes sure that the global dict is prepared to have a value set there. + This dictionary will store values and factors for the distribution functions, + so that they don't have to be calculated each time. + + Args: + global_dict: globally accessible dictionary where factors are stored in + list_of_sub_keys: List of keys that must become be(come) present in the global_dict + """ + + internal_dict_value = global_dict + + # This loop almost mimics a recursive loop into the dictionary. + # It checks whether the first key of the list is present, if not; set it with an empty dict. + # Then it overrides itself to be that (new) item, and goes on to do that again, until the list + # exhausted + for k in list_of_sub_keys: + # If the sub key doesnt exist then make an empty dict + if not internal_dict_value.get(k, None): + internal_dict_value[k] = {} + internal_dict_value = internal_dict_value[k] + + +def set_opts(opts: dict, newopts: dict) -> dict: + """ + Function to take a default dict and override it with newer values. + + # TODO: consider changing this to just a dict.update + + Args: + opts: dictionary with default values + newopts: dictionary with new values + + Returns: + returns an updated dictionary + """ + + if newopts: + for opt in newopts.keys(): + if opt in opts.keys(): + opts[opt] = newopts[opt] + + return opts + +def normalize_dict(result_dict, verbosity=0): + """ + Function to normalise a dictionary + """ + + sum_result = sum([result_dict[key] for key in result_dict.keys()]) + for key in result_dict.keys(): + result_dict[key] = result_dict[key] / sum_result + return result_dict diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py index f88492d1cd3546de338f86bf5fccad92e2719b0b..88d93b1bd6552d98608d54dc339f259ba6477134 100644 --- a/binarycpython/utils/distribution_functions.py +++ b/binarycpython/utils/distribution_functions.py @@ -20,17 +20,15 @@ Tasks: - TODO: make an n-part power law that's general enough to fix the three part and the 4 part """ +import cachetools import functools import math import json - +import numpy as np +import py_rinterpolate import traceback import sys - from typing import Union - -import numpy as np - from binarycpython.utils.useful_funcs import calc_period_from_sep, calc_sep_from_period from binarycpython.utils.functions import verbose_print from binarycpython.utils.grid_options_defaults import ( @@ -38,7 +36,10 @@ from binarycpython.utils.grid_options_defaults import ( _MOE2017_VERBOSITY_INTERPOLATOR_LEVEL, _MOE2017_VERBOSITY_INTERPOLATOR_EXTRA_LEVEL, ) +from binarycpython.utils.dicts import ( set_opts, prepare_dict ) +# Global dictinary to store values in +Moecache = {} ### # File containing probability distributions @@ -46,2218 +47,2008 @@ from binarycpython.utils.grid_options_defaults import ( LOG_LN_CONVERTER = 1.0 / math.log(10.0) distribution_constants = {} # To store the constants in +class distribution_functions(): + def __init__(self, **kwargs): + # don't do anything: we just inherit from this class + return -def prepare_dict(global_dict: dict, list_of_sub_keys: list) -> None: - """ - Function that makes sure that the global dict is prepared to have a value set there. - This dictionary will store values and factors for the distribution functions, - so that they don't have to be calculated each time. - - Args: - global_dict: globally accessible dictionary where factors are stored in - list_of_sub_keys: List of keys that must become be(come) present in the global_dict - """ - - internal_dict_value = global_dict - - # This loop almost mimics a recursive loop into the dictionary. - # It checks whether the first key of the list is present, if not; set it with an empty dict. - # Then it overrides itself to be that (new) item, and goes on to do that again, until the list - # exhausted - for k in list_of_sub_keys: - # If the sub key doesnt exist then make an empty dict - if not internal_dict_value.get(k, None): - internal_dict_value[k] = {} - internal_dict_value = internal_dict_value[k] - - -def set_opts(opts: dict, newopts: dict) -> dict: - """ - Function to take a default dict and override it with newer values. - - # TODO: consider changing this to just a dict.update - - Args: - opts: dictionary with default values - newopts: dictionary with new values - - Returns: - returns an updated dictionary - """ + def flat(self) -> float: + """ + Dummy distribution function that returns 1 - if newopts: - for opt in newopts.keys(): - if opt in opts.keys(): - opts[opt] = newopts[opt] + Returns: + a flat uniform distribution: 1 + """ - return opts + return 1.0 -def flat() -> float: - """ - Dummy distribution function that returns 1 + def number(self,value: Union[int, float]) -> Union[int, float]: + """ + Dummy distribution function that returns the input - Returns: - a flat uniform distribution: 1 - """ + Args: + value: the value that will be returned by this function. - return 1.0 + Returns: + the value that was provided + """ + return value -def number(value: Union[int, float]) -> Union[int, float]: - """ - Dummy distribution function that returns the input + def const_distribution( + self, + min_bound: Union[int, float], max_bound: Union[int, float], val: float = None + ) -> Union[int, float]: + """ + a constant distribution function between min=min_bound and max=max_bound. - Args: - value: the value that will be returned by this function. + Args: + min_bound: lower bound of the range + max_bound: upper bound of the range - Returns: - the value that was provided - """ + Returns: + returns the value of 1/(max_bound-min_bound). If val is provided, it will check whether min_bound < val <= max_bound. if not: returns 0 + """ - return value - - -def const( - min_bound: Union[int, float], max_bound: Union[int, float], val: float = None -) -> Union[int, float]: - """ - a constant distribution function between min=min_bound and max=max_bound. + if val and not min_bound < val <= max_bound: + prob = 0.0 + else: + prob = 1.0 / (max_bound - min_bound) + return prob - Args: - min_bound: lower bound of the range - max_bound: upper bound of the range - Returns: - returns the value of 1/(max_bound-min_bound). If val is provided, it will check whether min_bound < val <= max_bound. if not: returns 0 - """ + def powerlaw_constant_nocache(self, + min_val: Union[int, float], + max_val: Union[int, float], + k: Union[int, float] + ) -> Union[int, float]: + """ + Function that returns the constant to normalise a power law - if val: - if not min_bound < val <= max_bound: - print("out of bounds") - prob = 0 - return prob - prob = 1.0 / (max_bound - min_bound) - return prob - - -@functools.lru_cache(maxsize=16) -def powerlaw_constant( - min_val: Union[int, float], max_val: Union[int, float], k: Union[int, float] -) -> Union[int, float]: - """ - Function that returns the constant to normalise a power law - - TODO: what if k is -1? - - Args: - min_val: lower bound of the range - max_val: upper bound of the range - k: power law slope - - Returns: - constant to normalise the given power law between the min_val and max_val range - """ - - k1 = k + 1.0 - # print( - # "Powerlaw consts from {} to {}, k={} where k1={}".format( - # min_val, max_val, k, k1 - # ) - # ) - - powerlaw_const = k1 / (max_val ** k1 - min_val ** k1) - return powerlaw_const - - -def powerlaw( - min_val: Union[int, float], - max_val: Union[int, float], - k: Union[int, float], - x: Union[int, float], -) -> Union[int, float]: - """ - Single power law with index k at x from min to max - - Args: - min_val: lower bound of the power law - max_val: upper bound of the power law - k: slope of the power law - x: position at which we want to evaluate - - Returns: - `probability` at the given position(x) - """ - - # Handle faulty value - if k == -1: - msg = "wrong value for k" - raise ValueError(msg) - - if (x < min_val) or (x > max_val): - print("input value is out of bounds!") - return 0 - - powerlaw_const = powerlaw_constant(min_val, max_val, k) - - # power law - prob = powerlaw_const * (x ** k) - # print( - # "Power law from {} to {}: const = {}, y = {}".format( - # min_val, max_val, const, y - # ) - # ) - return prob - - -@functools.lru_cache(maxsize=16) -def calculate_constants_three_part_powerlaw( - m0: Union[int, float], - m1: Union[int, float], - m2: Union[int, float], - m_max: Union[int, float], - p1: Union[int, float], - p2: Union[int, float], - p3: Union[int, float], -) -> Union[int, float]: - """ - Function to calculate the constants for a three-part power law - - TODO: use the power law_constant function to calculate all these values - - Args: - m0: lower bound mass - m1: second boundary, between the first slope and the second slope - m2: third boundary, between the second slope and the third slope - m_max: upper bound mass - p1: first slope - p2: second slope - p3: third slope - - Returns: - array of normalisation constants - """ - - # print("Initialising constants for the three-part powerlaw: m0={} m1={} m2={}\ - # m_max={} p1={} p2={} p3={}\n".format(m0, m1, m2, m_max, p1, p2, p3)) - - array_constants_three_part_powerlaw = [0, 0, 0] - - array_constants_three_part_powerlaw[1] = ( - ((m1 ** p2) * (m1 ** (-p1))) - * (1.0 / (1.0 + p1)) - * (m1 ** (1.0 + p1) - m0 ** (1.0 + p1)) - ) - array_constants_three_part_powerlaw[1] += ( - (m2 ** (1.0 + p2) - m1 ** (1.0 + p2)) - ) * (1.0 / (1.0 + p2)) - array_constants_three_part_powerlaw[1] += ( - ((m2 ** p2) * (m2 ** (-p3))) - * (1.0 / (1.0 + p3)) - * (m_max ** (1.0 + p3) - m2 ** (1.0 + p3)) - ) - array_constants_three_part_powerlaw[1] = 1.0 / ( - array_constants_three_part_powerlaw[1] + 1e-50 - ) + TODO: what if k is -1? - array_constants_three_part_powerlaw[0] = array_constants_three_part_powerlaw[1] * ( - (m1 ** p2) * (m1 ** (-p1)) - ) - array_constants_three_part_powerlaw[2] = array_constants_three_part_powerlaw[1] * ( - (m2 ** p2) * (m2 ** (-p3)) - ) + Args: + min_val: lower bound of the range + max_val: upper bound of the range + k: power law slope - return array_constants_three_part_powerlaw - # $$array[1]=(($m1**$p2)*($m1**(-$p1)))* - # (1.0/(1.0+$p1))* - # ($m1**(1.0+$p1)-$m0**(1.0+$p1))+ - # (($m2**(1.0+$p2)-$m1**(1.0+$p2)))* - # (1.0/(1.0+$p2))+ - # (($m2**$p2)*($m2**(-$p3)))* - # (1.0/(1.0+$p3))* - # ($mmax**(1.0+$p3)-$m2**(1.0+$p3)); - # $$array[1]=1.0/($$array[1]+1e-50); - # $$array[0]=$$array[1]*$m1**$p2*$m1**(-$p1); - # $$array[2]=$$array[1]*$m2**$p2*$m2**(-$p3); - # #print "ARRAY SET @_ => @$array\n"; - # $threepart_powerlaw_consts{"@_"}=[@$array]; - - -def three_part_powerlaw( - m: Union[int, float], - m0: Union[int, float], - m1: Union[int, float], - m2: Union[int, float], - m_max: Union[int, float], - p1: Union[int, float], - p2: Union[int, float], - p3: Union[int, float], -) -> Union[int, float]: - """ - Generalised three-part power law, usually used for mass distributions - - Args: - m: mass at which we want to evaluate the distribution. - m0: lower bound mass - m1: second boundary, between the first slope and the second slope - m2: third boundary, between the second slope and the third slope - m_max: upper bound mass - p1: first slope - p2: second slope - p3: third slope - - Returns: - 'probability' at given mass m - """ - - # TODO: add check on whether the values exist - - three_part_powerlaw_constants = calculate_constants_three_part_powerlaw( - m0, m1, m2, m_max, p1, p2, p3 - ) + Returns: + constant to normalise the given power law between the min_val and max_val range + """ - if m < m0: - prob = 0.0 # Below lower bound - elif m <= m1: - prob = three_part_powerlaw_constants[0] * (m ** p1) # Between m0 and m1 - elif m <= m2: - prob = three_part_powerlaw_constants[1] * (m ** p2) # Between m1 and m2 - elif m <= m_max: - prob = three_part_powerlaw_constants[2] * (m ** p3) # Between m2 and m_max - else: - prob = 0 # Above m_max - - return prob - - -@functools.lru_cache(maxsize=16) -def gaussian_normalizing_const( - mean: Union[int, float], - sigma: Union[int, float], - gmin: Union[int, float], - gmax: Union[int, float], -) -> Union[int, float]: - """ - Function to calculate the normalisation constant for the Gaussian - - Args: - mean: mean of the Gaussian - sigma: standard deviation of the Gaussian - gmin: lower bound of the range to calculate the probabilities in - gmax: upper bound of the range to calculate the probabilities in - - Returns: - normalisation constant for the Gaussian distribution(mean, sigma) between gmin and gmax - """ - - # First time; calculate multiplier for given mean and sigma - ptot = 0 - resolution = 1000 - d = (gmax - gmin) / resolution - - for i in range(resolution): - y = gmin + i * d - ptot += d * gaussian_func(y, mean, sigma) - - # TODO: Set value in global - return ptot - - -def gaussian_func( - x: Union[int, float], mean: Union[int, float], sigma: Union[int, float] -) -> Union[int, float]: - """ - Function to evaluate a Gaussian at a given point, but this time without any boundaries. - - Args: - x: location at which to evaluate the distribution - mean: mean of the Gaussian - sigma: standard deviation of the Gaussian - - Returns: - value of the Gaussian at x - """ - gaussian_prefactor = 1.0 / math.sqrt(2.0 * math.pi) - - r = 1.0 / (sigma) - y = (x - mean) * r - return gaussian_prefactor * r * math.exp(-0.5 * y ** 2) - - -def gaussian( - x: Union[int, float], - mean: Union[int, float], - sigma: Union[int, float], - gmin: Union[int, float], - gmax: Union[int, float], -) -> Union[int, float]: - """ - Gaussian distribution function. used for e.g. Duquennoy + Mayor 1991 - - Args: - x: location at which to evaluate the distribution - mean: mean of the Gaussian - sigma: standard deviation of the Gaussian - gmin: lower bound of the range to calculate the probabilities in - gmax: upper bound of the range to calculate the probabilities in - - Returns: - 'probability' of the Gaussian distribution between the boundaries, evaluated at x - """ - - # # location (X value), mean and sigma, min and max range - # my ($x,$mean,$sigma,$gmin,$gmax) = @_; - - if (x < gmin) or (x > gmax): - prob = 0 - else: - # normalise over given range - # TODO: add loading into global var - normalisation = gaussian_normalizing_const(mean, sigma, gmin, gmax) - prob = gaussian_func(x, mean, sigma) / normalisation - - return prob - - -##### -# Mass distributions -##### - - -def Kroupa2001(m: Union[int, float], newopts: dict = None) -> Union[int, float]: - """ - Probability distribution function for Kroupa 2001 IMF, where the default values to the - three_part_powerlaw are: default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 100, "p1": -1.3, "p2": -2.3,"p3": -2.3} - - Args: - m: mass to evaluate the distribution at - newopts: optional dict to override the default values. - - Returns: - 'probability' of distribution function evaluated at m - """ - - # Default parameters and override them - default = { - "m0": 0.1, - "m1": 0.5, - "m2": 1, - "mmax": 100, - "p1": -1.3, - "p2": -2.3, - "p3": -2.3, - } - - value_dict = default.copy() - - if newopts: - value_dict.update(newopts) - - return three_part_powerlaw( - m, - value_dict["m0"], - value_dict["m1"], - value_dict["m2"], - value_dict["mmax"], - value_dict["p1"], - value_dict["p2"], - value_dict["p3"], - ) + k1 = k + 1.0 + # print( + # "Powerlaw consts from {} to {}, k={} where k1={}".format( + # min_val, max_val, k, k1 + # ) + # ) + powerlaw_const = k1 / (max_val ** k1 - min_val ** k1) + return powerlaw_const -def ktg93(m: Union[int, float], newopts: dict = None) -> Union[int, float]: - """ - Probability distribution function for KTG93 IMF, where the default values to the three_part_powerlaw are: default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 80, "p1": -1.3, "p2": -2.2,"p3": -2.7} - - Args: - m: mass to evaluate the distribution at - newopts: optional dict to override the default values. - - Returns: - 'probability' of distribution function evaluated at m - """ - # TODO: ask rob what this means - - # if($m eq 'uncertainties') - # { - # # return (pointer to) the uncertainties hash - # return { - # m0=>{default=>0.1, - # fixed=>1}, - # m1=>{default=>0.5, - # fixed=>1}, - # m2=>{default=>1.0, - # fixed=>1}, - # mmax=>{default=>80.0, - # fixed=>1}, - # p1=>{default=>-1.3, - # low=>-1.3, - # high=>-1.3}, - # p2=>{default=>-2.2, - # low=>-2.2, - # high=>-2.2}, - # p3=>{default=>-2.7, - # low=>-2.7, - # high=>-2.7} - # }; - # } - - # set options - # opts = set_opts({'m0':0.1, 'm1':0.5, 'm2':1.0, 'mmax':80, 'p1':-1.3, 'p2':-2.2, 'p3':-2.7}, - # newopts) - - defaults = { - "m0": 0.1, - "m1": 0.5, - "m2": 1.0, - "mmax": 80, - "p1": -1.3, - "p2": -2.2, - "p3": -2.7, - } - value_dict = defaults.copy() - - if newopts: - value_dict.update(newopts) - - return three_part_powerlaw( - m, - value_dict["m0"], - value_dict["m1"], - value_dict["m2"], - value_dict["mmax"], - value_dict["p1"], - value_dict["p2"], - value_dict["p3"], + @cachetools.cachedmethod( + lambda self: self.caches["distribution_functions.powerlaw_constant"] ) + def powerlaw_constant(self, + min_val: Union[int, float], + max_val: Union[int, float], + k: Union[int, float] + ) -> Union[int, float]: + """ + Function that returns the constant to normalise a power law + + TODO: what if k is -1? + + Args: + min_val: lower bound of the range + max_val: upper bound of the range + k: power law slope + + Returns: + constant to normalise the given power law between the min_val and max_val range + """ + + k1 = k + 1.0 + # print( + # "Powerlaw consts from {} to {}, k={} where k1={}".format( + # min_val, max_val, k, k1 + # ) + # ) + powerlaw_const = k1 / (max_val ** k1 - min_val ** k1) + return powerlaw_const + + + def powerlaw( + self, + min_val: Union[int, float], + max_val: Union[int, float], + k: Union[int, float], + x: Union[int, float], + ) -> Union[int, float]: + """ + Single power law with index k at x from min to max + + Args: + min_val: lower bound of the power law + max_val: upper bound of the power law + k: slope of the power law + x: position at which we want to evaluate + + Returns: + `probability` at the given position(x) + """ + + # Handle faulty value + if k == -1: + msg = "wrong value for k" + raise ValueError(msg) -# sub ktg93_lnspace -# { -# # wrapper for KTG93 on a ln(m) grid -# my $m=$_[0]; -# return ktg93(@_) * $m; -# } - - -def imf_tinsley1980(m: Union[int, float]) -> Union[int, float]: - """ - Probability distribution function for Tinsley 1980 IMF (defined up until 80Msol): three_part_powerlaw(m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3) - - Args: - m: mass to evaluate the distribution at - - Returns: - 'probability' of distribution function evaluated at m - """ - - return three_part_powerlaw(m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3) - - -def imf_scalo1986(m: Union[int, float]) -> Union[int, float]: - """ - Probability distribution function for Scalo 1986 IMF (defined up until 80Msol): three_part_powerlaw(m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70) - - Args: - m: mass to evaluate the distribution at - - Returns: - 'probability' of distribution function evaluated at m - """ - return three_part_powerlaw(m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70) + if (x < min_val) or (x > max_val): + print("input value is out of bounds!") + return 0 + powerlaw_const = self.powerlaw_constant(min_val, max_val, k) -def imf_scalo1998(m: Union[int, float]) -> Union[int, float]: - """ - From Scalo 1998 + # power law + prob = powerlaw_const * (x ** k) + return prob - Probability distribution function for Scalo 1998 IMF (defined up until 80Msol): three_part_powerlaw(m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3) - Args: - m: mass to evaluate the distribution at + @cachetools.cachedmethod( + lambda self: self.caches["distribution_functions.calculate_constants_three_part_powerlaw"] + ) + def calculate_constants_three_part_powerlaw( + self, + m0: Union[int, float], + m1: Union[int, float], + m2: Union[int, float], + m_max: Union[int, float], + p1: Union[int, float], + p2: Union[int, float], + p3: Union[int, float], + ) -> Union[int, float]: + """ + Function to calculate the constants for a three-part power law + + TODO: use the power law_constant function to calculate all these values + + Args: + m0: lower bound mass + m1: second boundary, between the first slope and the second slope + m2: third boundary, between the second slope and the third slope + m_max: upper bound mass + p1: first slope + p2: second slope + p3: third slope + + Returns: + array of normalisation constants + """ + + array_constants_three_part_powerlaw = [0, 0, 0] + + array_constants_three_part_powerlaw[1] = ( + ((m1 ** p2) * (m1 ** (-p1))) + * (1.0 / (1.0 + p1)) + * (m1 ** (1.0 + p1) - m0 ** (1.0 + p1)) + ) + array_constants_three_part_powerlaw[1] += ( + (m2 ** (1.0 + p2) - m1 ** (1.0 + p2)) + ) * (1.0 / (1.0 + p2)) + array_constants_three_part_powerlaw[1] += ( + ((m2 ** p2) * (m2 ** (-p3))) + * (1.0 / (1.0 + p3)) + * (m_max ** (1.0 + p3) - m2 ** (1.0 + p3)) + ) + array_constants_three_part_powerlaw[1] = 1.0 / ( + array_constants_three_part_powerlaw[1] + 1e-50 + ) - Returns: - 'probability' of distribution function evaluated at m - """ + array_constants_three_part_powerlaw[0] = array_constants_three_part_powerlaw[1] * ( + (m1 ** p2) * (m1 ** (-p1)) + ) + array_constants_three_part_powerlaw[2] = array_constants_three_part_powerlaw[1] * ( + (m2 ** p2) * (m2 ** (-p3)) + ) - return three_part_powerlaw(m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3) + return array_constants_three_part_powerlaw + + + def three_part_powerlaw( + self, + m: Union[int, float], + m0: Union[int, float], + m1: Union[int, float], + m2: Union[int, float], + m_max: Union[int, float], + p1: Union[int, float], + p2: Union[int, float], + p3: Union[int, float], + ) -> Union[int, float]: + """ + Generalised three-part power law, usually used for mass distributions + + Args: + m: mass at which we want to evaluate the distribution. + m0: lower bound mass + m1: second boundary, between the first slope and the second slope + m2: third boundary, between the second slope and the third slope + m_max: upper bound mass + p1: first slope + p2: second slope + p3: third slope + + Returns: + 'probability' at given mass m + """ + + # TODO: add check on whether the values exist + + three_part_powerlaw_constants = self.calculate_constants_three_part_powerlaw( + m0, m1, m2, m_max, p1, p2, p3 + ) + if m < m0: + prob = 0.0 # Below lower bound + elif m <= m1: + prob = three_part_powerlaw_constants[0] * (m ** p1) # Between m0 and m1 + elif m <= m2: + prob = three_part_powerlaw_constants[1] * (m ** p2) # Between m1 and m2 + elif m <= m_max: + prob = three_part_powerlaw_constants[2] * (m ** p3) # Between m2 and m_max + else: + prob = 0 # Above m_max -def imf_chabrier2003(m: Union[int, float]) -> Union[int, float]: - """ - Probability distribution function for IMF of Chabrier 2003 PASP 115:763-795 + return prob - Args: - m: mass to evaluate the distribution at - Returns: - 'probability' of distribution function evaluated at m - """ + @cachetools.cachedmethod( + lambda self: self.caches["distribution_functions.gaussian_normalizing_const"] + ) + def gaussian_normalizing_const( + self, + mean: Union[int, float], + sigma: Union[int, float], + gmin: Union[int, float], + gmax: Union[int, float], + ) -> Union[int, float]: + """ + Function to calculate the normalisation constant for the Gaussian + + Args: + mean: mean of the Gaussian + sigma: standard deviation of the Gaussian + gmin: lower bound of the range to calculate the probabilities in + gmax: upper bound of the range to calculate the probabilities in + + Returns: + normalisation constant for the Gaussian distribution(mean, sigma) between gmin and gmax + """ + + # First time; calculate multiplier for given mean and sigma + ptot = 0 + resolution = 1000 + d = (gmax - gmin) / resolution + + for i in range(resolution): + y = gmin + i * d + ptot += d * self.gaussian_func(y, mean, sigma) + + # TODO: Set value in global + return ptot + + def gaussian_func( + self, + x: Union[int, float], mean: Union[int, float], sigma: Union[int, float] + ) -> Union[int, float]: + """ + Function to evaluate a Gaussian at a given point, but this time without any boundaries. + + Args: + x: location at which to evaluate the distribution + mean: mean of the Gaussian + sigma: standard deviation of the Gaussian + + Returns: + value of the Gaussian at x + """ + gaussian_prefactor = 1.0 / math.sqrt(2.0 * math.pi) + + r = 1.0 / (sigma) + y = (x - mean) * r + return gaussian_prefactor * r * math.exp(-0.5 * y ** 2) + + def gaussian( + self, + x: Union[int, float], + mean: Union[int, float], + sigma: Union[int, float], + gmin: Union[int, float], + gmax: Union[int, float], + ) -> Union[int, float]: + """ + Gaussian distribution function. used for e.g. Duquennoy + Mayor 1991 + + Args: + x: location at which to evaluate the distribution + mean: mean of the Gaussian + sigma: standard deviation of the Gaussian + gmin: lower bound of the range to calculate the probabilities in + gmax: upper bound of the range to calculate the probabilities in + + Returns: + 'probability' of the Gaussian distribution between the boundaries, evaluated at x + """ + + # # location (X value), mean and sigma, min and max range + # my ($x,$mean,$sigma,$gmin,$gmax) = @_; + + if (x < gmin) or (x > gmax): + prob = 0 + else: + # normalise over given range + # TODO: add loading into global var + normalisation = self.gaussian_normalizing_const(mean, sigma, gmin, gmax) + prob = self.gaussian_func(x, mean, sigma) / normalisation + + return prob + + + ##### + # Mass distributions + ##### + + def Kroupa2001(self, + m: Union[int, float], newopts: dict = None) -> Union[int, float]: + """ + Probability distribution function for Kroupa 2001 IMF, where the default values to the + three_part_powerlaw are: default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 100, "p1": -1.3, "p2": -2.3,"p3": -2.3} + + Args: + m: mass to evaluate the distribution at + newopts: optional dict to override the default values. + + Returns: + 'probability' of distribution function evaluated at m + """ + + # Default parameters and override them + default = { + "m0": 0.1, + "m1": 0.5, + "m2": 1, + "mmax": 100, + "p1": -1.3, + "p2": -2.3, + "p3": -2.3, + } + + value_dict = default.copy() + + if newopts: + value_dict.update(newopts) + + return self.three_part_powerlaw( + m, + value_dict["m0"], + value_dict["m1"], + value_dict["m2"], + value_dict["mmax"], + value_dict["p1"], + value_dict["p2"], + value_dict["p3"], + ) - chabrier_logmc = math.log10(0.079) - chabrier_sigma2 = 0.69 * 0.69 - chabrier_a1 = 0.158 - chabrier_a2 = 4.43e-2 - chabrier_x = -1.3 - if m <= 0: - msg = "below bounds" - raise ValueError(msg) - if 0 < m < 1.0: - A = 0.158 - dm = math.log10(m) - chabrier_logmc - prob = chabrier_a1 * math.exp(-(dm ** 2) / (2.0 * chabrier_sigma2)) - else: - prob = chabrier_a2 * (m ** chabrier_x) - prob = prob / (0.1202462 * m * math.log(10)) - return prob + def ktg93(self, + m: Union[int, float], + newopts: dict = None) -> Union[int, float]: + """ + Probability distribution function for KTG93 IMF, where the default values to the three_part_powerlaw are: default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 80, "p1": -1.3, "p2": -2.2,"p3": -2.7} + + Args: + m: mass to evaluate the distribution at + newopts: optional dict to override the default values. + + Returns: + 'probability' of distribution function evaluated at m + """ + + defaults = { + "m0": 0.1, + "m1": 0.5, + "m2": 1.0, + "mmax": 80, + "p1": -1.3, + "p2": -2.2, + "p3": -2.7, + } + value_dict = defaults.copy() + + if newopts: + value_dict.update(newopts) + + return self.three_part_powerlaw( + m, + value_dict["m0"], + value_dict["m1"], + value_dict["m2"], + value_dict["mmax"], + value_dict["p1"], + value_dict["p2"], + value_dict["p3"], + ) -######################################################################## -# Binary fractions -######################################################################## + def imf_tinsley1980(self, + m: Union[int, float]) -> Union[int, float]: + """ + Probability distribution function for Tinsley 1980 IMF (defined up until 80Msol): self.three_part_powerlaw(m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3) + Args: + m: mass to evaluate the distribution at -def Arenou2010_binary_fraction(m: Union[int, float]) -> Union[int, float]: - """ - Arenou 2010 function for the binary fraction as f(M1) + Returns: + 'probability' of distribution function evaluated at m + """ - GAIA-C2-SP-OPM-FA-054 - www.rssd.esa.int/doc_fetch.php?id=2969346 + return self.three_part_powerlaw(m, 0.1, 2.0, 10.0, 80.0, -2.0, -2.3, -3.3) - Args: - m: mass to evaluate the distribution at - Returns: - binary fraction at m - """ + def imf_scalo1986(self, + m: Union[int, float]) -> Union[int, float]: + """ + Probability distribution function for Scalo 1986 IMF (defined up until 80Msol): self.three_part_powerlaw(m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70) - return 0.8388 * math.tanh(0.688 * m + 0.079) + Args: + m: mass to evaluate the distribution at + Returns: + 'probability' of distribution function evaluated at m + """ + return self.three_part_powerlaw(m, 0.1, 1.0, 2.0, 80.0, -2.35, -2.35, -2.70) -# print(Arenou2010_binary_fraction(0.4)) + def imf_scalo1998(self, + m: Union[int, float]) -> Union[int, float]: + """ + From Scalo 1998 -def raghavan2010_binary_fraction(m: Union[int, float]) -> Union[int, float]: - """ - Fit to the Raghavan 2010 binary fraction as a function of - spectral type (Fig 12). Valid for local stars (Z=Zsolar). + Probability distribution function for Scalo 1998 IMF (defined up until 80Msol): self.three_part_powerlaw(m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3) - The spectral type is converted mass by use of the ZAMS - effective temperatures from binary_c/BSE (at Z=0.02) - and the new "long_spectral_type" function of binary_c - (based on Jaschek+Jaschek's Teff-spectral type table). + Args: + m: mass to evaluate the distribution at - Rob then fitted the result + Returns: + 'probability' of distribution function evaluated at m + """ - Args: - m: mass to evaluate the distribution at + return self.three_part_powerlaw(m, 0.1, 1.0, 10.0, 80.0, -1.2, -2.7, -2.3) - Returns: - binary fraction at m - """ - return min( - 1.0, - max( - (m ** 0.1) * (5.12310e-01) + (-1.02070e-01), - (1.10450e00) * (m ** (4.93670e-01)) + (-6.95630e-01), - ), - ) + def imf_chabrier2003(self, + m: Union[int, float]) -> Union[int, float]: + """ + Probability distribution function for IMF of Chabrier 2003 PASP 115:763-795 + Args: + m: mass to evaluate the distribution at -# print(raghavan2010_binary_fraction(2)) - -######################################################################## -# Period distributions -######################################################################## - - -def duquennoy1991(logper: Union[int, float]) -> Union[int, float]: - """ - Period distribution from Duquennoy + Mayor 1991. Evaluated the function gaussian(logper, 4.8, 2.3, -2, 12) - - Args: - logper: logarithm of period to evaluate the distribution at - - Returns: - 'probability' at gaussian(logper, 4.8, 2.3, -2, 12) - """ - return gaussian(logper, 4.8, 2.3, -2, 12) - - -def sana12( - M1: Union[int, float], - M2: Union[int, float], - a: Union[int, float], - P: Union[int, float], - amin: Union[int, float], - amax: Union[int, float], - x0: Union[int, float], - x1: Union[int, float], - p: Union[int, float], -) -> Union[int, float]: - """ - distribution of initial orbital periods as found by Sana et al. (2012) - which is a flat distribution in ln(a) and ln(P) respectively for stars - * less massive than 15Msun (no O-stars) - * mass ratio q=M2/M1<0.1 - * log(P)<0.15=x0 and log(P)>3.5=x1 - and is be given by dp/dlogP ~ (logP)^p for all other binary configurations (default p=-0.55) - - arguments are M1, M2, a, Period P, amin, amax, x0=log P0, x1=log P1, p - - example args: 10, 5, sep(M1, M2, P), sep, ?, -2, 12, -0.55 - - # TODO: Fix this function! - - Args: - M1: Mass of primary - M2: Mass of secondary - a: separation of binary - P: period of binary - amin: minimum separation of the distribution (lower bound of the range) - amax: maximum separation of the distribution (upper bound of the range) - x0: log of minimum period of the distribution (lower bound of the range) - x1: log of maximum period of the distribution (upper bound of the range) - p: slope of the distribution - - Returns: - 'probability' of orbital period P given the other parameters - """ - - res = 0 - if (M1 < 15.0) or (M2 / M1 < 0.1): - res = 1.0 / (math.log(amax) - math.log(amin)) - else: - p1 = 1.0 + p - - # For more details see the LyX document of binary_c for this distribution - # where the variables and normalisations are given - # we use the notation x=log(P), xmin=log(Pmin), x0=log(P0), ... to determine the - x = LOG_LN_CONVERTER * math.log(P) - xmin = LOG_LN_CONVERTER * math.log(calc_period_from_sep(M1, M2, amin)) - xmax = LOG_LN_CONVERTER * math.log(calc_period_from_sep(M1, M2, amax)) - - # print("M1 M2 amin amax P x xmin xmax") - # print(M1, M2, amin, amax, P, x, xmin, xmax) - # my $x0 = 0.15; - # my $x1 = 3.5; - - A1 = 1.0 / ( - x0 ** p * (x0 - xmin) + (x1 ** p1 - x0 ** p1) / p1 + x1 ** p * (xmax - x1) - ) - A0 = A1 * x0 ** p - A2 = A1 * x1 ** p + Returns: + 'probability' of distribution function evaluated at m + """ - if x < x0: - res = 3.0 / 2.0 * LOG_LN_CONVERTER * A0 - elif x > x1: - res = 3.0 / 2.0 * LOG_LN_CONVERTER * A2 + chabrier_logmc = math.log10(0.079) + chabrier_sigma2 = 0.69 * 0.69 + chabrier_a1 = 0.158 + chabrier_a2 = 4.43e-2 + chabrier_x = -1.3 + if m <= 0: + msg = "below bounds" + raise ValueError(msg) + if 0 < m < 1.0: + A = 0.158 + dm = math.log10(m) - chabrier_logmc + prob = chabrier_a1 * math.exp(-(dm ** 2) / (2.0 * chabrier_sigma2)) else: - res = 3.0 / 2.0 * LOG_LN_CONVERTER * A1 * x ** p - - return res - - -# print(sana12(10, 2, 10, 100, 1, 1000, math.log(10), math.log(1000), 6)) - - -def interpolate_in_mass_izzard2012( - M: Union[int, float], high: Union[int, float], low: Union[int, float] -) -> Union[int, float]: - """ - Function to interpolate in mass - - TODO: fix this function. - TODO: describe the args - high: at M=16.3 - low: at 1.15 - - Args: - M: mass - high: - low: + prob = chabrier_a2 * (m ** chabrier_x) + prob = prob / (0.1202462 * m * math.log(10)) + return prob - Returns: - """ + ######################################################################## + # Binary fractions + ######################################################################## - log_interpolation = False - if log_interpolation: - return (high - low) / (math.log10(16.3) - math.log10(1.15)) * ( - math.log10(M) - math.log10(1.15) - ) + low - else: - return (high - low) / (16.3 - 1.15) * (M - 1.15) + low + def Arenou2010_binary_fraction(self, + m: Union[int, float]) -> Union[int, float]: + """ + Arenou 2010 function for the binary fraction as f(M1) + GAIA-C2-SP-OPM-FA-054 + www.rssd.esa.int/doc_fetch.php?id=2969346 -def Izzard2012_period_distribution( - P: Union[int, float], M1: Union[int, float], log10Pmin: Union[int, float] = -1.0 -) -> Union[int, float]: - """ - period distribution which interpolates between - Duquennoy and Mayor 1991 at low mass (G/K spectral type <~1.15Msun) - and Sana et al 2012 at high mass (O spectral type >~16.3Msun) + Args: + m: mass to evaluate the distribution at - This gives dN/dlogP, i.e. DM/Raghavan's Gaussian in log10P at low mass - and Sana's power law (as a function of logP) at high mass + Returns: + binary fraction at m + """ - TODO: fix this function + return 0.8388 * math.tanh(0.688 * m + 0.079) - Args: - P: period - M1: Primary star mass - log10Pmin: minimum period in base log10 (optional) - Returns: - 'probability' of interpolated distribution function at P and M1 + def raghavan2010_binary_fraction(self, + m: Union[int, float]) -> Union[int, float]: + """ + Fit to the Raghavan 2010 binary fraction as a function of + spectral type (Fig 12). Valid for local stars (Z=Zsolar). - """ + The spectral type is converted mass by use of the ZAMS + effective temperatures from binary_c/BSE (at Z=0.02) + and the new "long_spectral_type" function of binary_c + (based on Jaschek+Jaschek's Teff-spectral type table). - # Check if there is input and force it to be at least 1 - log10Pmin = max(-1.0, log10Pmin) + Rob then fitted the result - # save mass input and limit mass used (M1 from now on) to fitted range - Mwas = M1 - M1 = max(1.15, min(16.3, M1)) - # print("Izzard2012 called for M={} (truncated to {}), P={}\n".format(Mwas, M1, P)) + Args: + m: mass to evaluate the distribution at - # Calculate the normalisations - # need to normalise the distribution for this mass - # (and perhaps secondary mass) - prepare_dict(distribution_constants, ["Izzard2012", M1]) - if not distribution_constants["Izzard2012"][M1].get(log10Pmin): - distribution_constants["Izzard2012"][M1][ - log10Pmin - ] = 1 # To prevent this loop from going recursive - N = 200.0 # Resolution for normalisation. I hope 1000 is enough - dlP = (10.0 - log10Pmin) / N - C = 0 # normalisation constant. - # print("LOOP",log10Pmin) - for lP in np.arange(log10Pmin, 10, dlP): - C += dlP * Izzard2012_period_distribution(10 ** lP, M1, log10Pmin) + Returns: + binary fraction at m + """ - distribution_constants["Izzard2012"][M1][log10Pmin] = 1.0 / C - # print( - # "Normalisation constant for Izzard2012 M={} (log10Pmin={}) is\ - # {}\n".format( - # M1, log10Pmin, distribution_constants["Izzard2012"][M1][log10Pmin] - # ) - # ) - - lP = math.log10(P) - # log period - - # # fits - mu = interpolate_in_mass_izzard2012(M1, -17.8, 5.03) - sigma = interpolate_in_mass_izzard2012(M1, 9.18, 2.28) - K = interpolate_in_mass_izzard2012(M1, 6.93e-2, 0.0) - nu = interpolate_in_mass_izzard2012(M1, 0.3, -1) - g = 1.0 / (1.0 + 1e-30 ** (lP - nu)) - - lPmu = lP - mu - # print( - # "M={} ({}) P={} : mu={} sigma={} K={} nu={} norm=%g\n".format( - # Mwas, M1, P, mu, sigma, K, nu - # ) - # ) - - # print "FUNC $distdata{Izzard2012}{$M}{$log10Pmin} * (exp(- (x-$mu)**2/(2.0*$sigma*$sigma) ) + $K/MAX(0.1,$lP)) * $g;\n"; - - if (lP < log10Pmin) or (lP > 10.0): - return 0 - - else: - return ( - distribution_constants["Izzard2012"][M1][log10Pmin] - * (math.exp(-lPmu * lPmu / (2.0 * sigma * sigma)) + K / max(0.1, lP)) - * g + return min( + 1.0, + max( + (m ** 0.1) * (5.12310e-01) + (-1.02070e-01), + (1.10450e00) * (m ** (4.93670e-01)) + (-6.95630e-01), + ), ) + ######################################################################## + # Period distributions + ######################################################################## + + + def duquennoy1991(self, + logper: Union[int, float]) -> Union[int, float]: + """ + Period distribution from Duquennoy + Mayor 1991. Evaluated the function self.gaussian(logper, 4.8, 2.3, -2, 12) + + Args: + logper: logarithm of period to evaluate the distribution at + + Returns: + 'probability' at self.gaussian(logper, 4.8, 2.3, -2, 12) + """ + return self.gaussian(logper, 4.8, 2.3, -2, 12) + + + def sana12( + self, + M1: Union[int, float], + M2: Union[int, float], + a: Union[int, float], + P: Union[int, float], + amin: Union[int, float], + amax: Union[int, float], + x0: Union[int, float], + x1: Union[int, float], + p: Union[int, float], + ) -> Union[int, float]: + """ + distribution of initial orbital periods as found by Sana et al. (2012) + which is a flat distribution in ln(a) and ln(P) respectively for stars + * less massive than 15Msun (no O-stars) + * mass ratio q=M2/M1<0.1 + * log(P)<0.15=x0 and log(P)>3.5=x1 + and is be given by dp/dlogP ~ (logP)^p for all other binary configurations (default p=-0.55) + + arguments are M1, M2, a, Period P, amin, amax, x0=log P0, x1=log P1, p + + example args: 10, 5, sep(M1, M2, P), sep, ?, -2, 12, -0.55 + + # TODO: Fix this function! + + Args: + M1: Mass of primary + M2: Mass of secondary + a: separation of binary + P: period of binary + amin: minimum separation of the distribution (lower bound of the range) + amax: maximum separation of the distribution (upper bound of the range) + x0: log of minimum period of the distribution (lower bound of the range) + x1: log of maximum period of the distribution (upper bound of the range) + p: slope of the distribution + + Returns: + 'probability' of orbital period P given the other parameters + """ + + res = 0 + if (M1 < 15.0) or (M2 / M1 < 0.1): + res = 1.0 / (math.log(amax) - math.log(amin)) + else: + p1 = 1.0 + p -######################################################################## -# Mass ratio distributions -######################################################################## - - -def flatsections(x: float, opts: dict) -> Union[float, int]: - """ - Function to generate flat distributions, possibly in multiple sections - - Args: - x: mass ratio value - opts: list containing the flat sections. Which are themselves dictionaries, with keys "max": upper bound, "min": lower bound and "height": value - - Returns: - probability of that mass ratio. - """ - - c = 0 - y = 0 + # For more details see the LyX document of binary_c for this distribution + # where the variables and normalisations are given + # we use the notation x=log(P), xmin=log(Pmin), x0=log(P0), ... to determine the + x = LOG_LN_CONVERTER * math.log(P) + xmin = LOG_LN_CONVERTER * math.log(calc_period_from_sep(M1, M2, amin)) + xmax = LOG_LN_CONVERTER * math.log(calc_period_from_sep(M1, M2, amax)) - for opt in opts: - dc = (opt["max"] - opt["min"]) * opt["height"] - # print("added flatsection ({}-{})*{} = {}\n".format( - # opt['max'], opt['min'], opt['height'], dc)) - c += dc - if opt["min"] <= x <= opt["max"]: - y = opt["height"] - # print("Use this\n") + A1 = 1.0 / ( + x0 ** p * (x0 - xmin) + (x1 ** p1 - x0 ** p1) / p1 + x1 ** p * (xmax - x1) + ) + A0 = A1 * x0 ** p + A2 = A1 * x1 ** p - c = 1.0 / c - y = y * c + if x < x0: + res = A0 + elif x > x1: + res = A2 + else: + res = A1 * x ** p + res *= 3.0/2.0 * LOG_LN_CONVERTER + return res - # print("flatsections gives C={}: y={}\n",c,y) - return y + def interpolate_in_mass_izzard2012( + self, + M: Union[int, float], high: Union[int, float], low: Union[int, float] + ) -> Union[int, float]: + """ + Function to interpolate in mass + TODO: fix this function. + TODO: describe the args + high: at M=16.3 + low: at 1.15 -# print(flatsections(1, [{'min': 0, 'max': 2, 'height': 3}])) + Args: + M: mass + high: + low: -######################################################################## -# Eccentricity distributions -######################################################################## + Returns: -######################################################################## -# Star formation histories -######################################################################## + """ + log_interpolation = False -def cosmic_SFH_madau_dickinson2014(z): - """ - Cosmic star formation history distribution from Madau & Dickonson 2014 (https://arxiv.org/pdf/1403.0007.pdf) + if log_interpolation: + return (high - low) / (math.log10(16.3) - math.log10(1.15)) * ( + math.log10(M) - math.log10(1.15) + ) + low + else: + return (high - low) / (16.3 - 1.15) * (M - 1.15) + low + + + def Izzard2012_period_distribution( + self, + P: Union[int, float], + M1: Union[int, float], + log10Pmin: Union[int, float] = -1.0 + ) -> Union[int, float]: + """ + period distribution which interpolates between + Duquennoy and Mayor 1991 at low mass (G/K spectral type <~1.15Msun) + and Sana et al 2012 at high mass (O spectral type >~16.3Msun) + + This gives dN/dlogP, i.e. DM/Raghavan's Gaussian in log10P at low mass + and Sana's power law (as a function of logP) at high mass + + TODO: fix this function + + Args: + P: period + M1: Primary star mass + log10Pmin: minimum period in base log10 (optional) + + Returns: + 'probability' of interpolated distribution function at P and M1 + + """ + + # Check if there is input and force it to be at least 1 + log10Pmin = max(-1.0, log10Pmin) + + # save mass input and limit mass used (M1 from now on) to fitted range + Mwas = M1 + M1 = max(1.15, min(16.3, M1)) + + # Calculate the normalisations + # need to normalise the distribution for this mass + # (and perhaps secondary mass) + prepare_dict(distribution_constants, ["Izzard2012", M1]) + if not distribution_constants["Izzard2012"][M1].get(log10Pmin): + distribution_constants["Izzard2012"][M1][ + log10Pmin + ] = 1 # To prevent this loop from going recursive + N = 200.0 # Resolution for normalisation. I hope 1000 is enough + dlP = (10.0 - log10Pmin) / N + C = 0 # normalisation constant. + for lP in np.arange(log10Pmin, 10, dlP): + C += dlP * self.Izzard2012_period_distribution(10 ** lP, M1, log10Pmin) + + distribution_constants["Izzard2012"][M1][log10Pmin] = 1.0 / C + + lP = math.log10(P) + # log period + + # # fits + mu = self.interpolate_in_mass_izzard2012(M1, -17.8, 5.03) + sigma = self.interpolate_in_mass_izzard2012(M1, 9.18, 2.28) + K = self.interpolate_in_mass_izzard2012(M1, 6.93e-2, 0.0) + nu = self.interpolate_in_mass_izzard2012(M1, 0.3, -1) + g = 1.0 + 1e-30 ** (lP - nu) + lPmu = lP - mu + + if (lP < log10Pmin) or (lP > 10.0): + return 0 - Args: - z: redshift + else: + return ( + distribution_constants["Izzard2012"][M1][log10Pmin] + * (math.exp(-lPmu * lPmu / (2.0 * sigma * sigma)) + K / max(0.1, lP)) + / g + ) - Returns: - Cosmic star formation rate in Solar mass year^-1 mega parsec^-3 - """ - CSFH = 0.015 * ((1 + z) ** 2.7) / (1 + (((1 + z) / 2.9) ** 5.6)) + ######################################################################## + # Mass ratio distributions + ######################################################################## + def flatsections(self, + x: float, opts: dict) -> Union[float, int]: + """ + Function to generate flat distributions, possibly in multiple sections - return CSFH + Args: + x: mass ratio value + opts: list containing the flat sections. Which are themselves dictionaries, with keys "max": upper bound, "min": lower bound and "height": value + Returns: + probability of that mass ratio. + """ -######################################################################## -# Metallicity distributions -######################################################################## + c = 0 + y = 0 + for opt in opts: + dc = (opt["max"] - opt["min"]) * opt["height"] + c += dc + if opt["min"] <= x <= opt["max"]: + y = opt["height"] + y /= c + return y + ######################################################################## + # Eccentricity distributions + ######################################################################## -######################################################################## -# Moe & DiStefano 2017 functions -# -# The code below are functions that are used to set up and interpolate -# on the Moe & DiStefano 2017 data. The interpolators take the last -# known value if we try to interpolate outside of the tables. -# There are still some open tasks and improvements that can be made: -# -# TODO: Solve the memory issues that are present. -# Are the interpolators not cleaned? -# TODO: Parallelize the setting up of the interpolators -# TODO: Generalise the code such that we can input other/newer tables + ######################################################################## + # Star formation histories + ######################################################################## -######################################################################## -import py_rinterpolate + def cosmic_SFH_madau_dickinson2014(self,z): + """ + Cosmic star formation history distribution from Madau & Dickonson 2014 (https://arxiv.org/pdf/1403.0007.pdf) -# Global dictionary to store values in -Moecache = {} + Args: + z: redshift + Returns: + Cosmic star formation rate in Solar mass year^-1 mega parsec^-3 + """ -def poisson(lambda_val, n, nmax=None, verbosity=0): - """ - Function that calculates the Poisson value and normalises - TODO: improve the description - """ + CSFH = 0.015 * ((1 + z) ** 2.7) / (1 + (((1 + z) / 2.9) ** 5.6)) + return CSFH - cachekey = "{} {} {}".format(lambda_val, n, nmax) - if distribution_constants.get("poisson_cache", None): - if distribution_constants["poisson_cache"].get(cachekey, None): - p_val = distribution_constants["poisson_cache"][cachekey] + ######################################################################## + # Metallicity distributions + ######################################################################## - return p_val - # Poisson distribution : note, n can be zero + ######################################################################## + # Moe & DiStefano 2017 functions # - # nmax is the truncation : if set, we normalise - # correctly. - p_val = _poisson(lambda_val, n) - - if nmax: - I_poisson = 0 - for i in range(nmax + 1): - I_poisson += _poisson(lambda_val, i) - p_val = p_val / I_poisson - - # Add to cache - if not distribution_constants.get("poisson_cache", None): - distribution_constants["poisson_cache"] = {} - distribution_constants["poisson_cache"][cachekey] = p_val - - return p_val - - -def _poisson(lambda_val, n): - """ - Function to return the Poisson value - """ + # The code below are functions that are used to set up and interpolate + # on the Moe & DiStefano 2017 data. The interpolators take the last + # known value if we try to interpolate outside of the tables. + # There are still some open tasks and improvements that can be made: + # + # TODO: Solve the memory issues that are present. + # Are the interpolators not cleaned? + # TODO: Parallelize the setting up of the interpolators + # TODO: Generalise the code such that we can input other/newer tables + ######################################################################## + + def poisson(self,lambda_val, n, nmax=None, verbosity=0): + """ + Function that calculates the Poisson value and normalises + TODO: improve the description + """ + + cachekey = "{} {} {}".format(lambda_val, n, nmax) + + if distribution_constants.get("poisson_cache", None): + if distribution_constants["poisson_cache"].get(cachekey, None): + p_val = distribution_constants["poisson_cache"][cachekey] + return p_val + + # Poisson distribution : note, n can be zero + # + # nmax is the truncation : if set, we normalise + # correctly. + p_val = self._poisson(lambda_val, n) - return (lambda_val ** n) * np.exp(-lambda_val) / (1.0 * math.factorial(n)) + if nmax: + I_poisson = 0 + for i in range(nmax + 1): + I_poisson += self._poisson(lambda_val, i) + p_val /= I_poisson + # Add to cache + if not distribution_constants.get("poisson_cache", None): + distribution_constants["poisson_cache"] = {} + distribution_constants["poisson_cache"][cachekey] = p_val -def get_max_multiplicity(multiplicity_array): - """ - Function to get the maximum multiplicity - """ + return p_val - max_multiplicity = 0 - for n in range(4): - if multiplicity_array[n] > 0: - max_multiplicity = n + 1 - return max_multiplicity + def _poisson(self,lambda_val, n): + """ + Function to return the Poisson value + """ + return (lambda_val ** n) * np.exp(-lambda_val) / (1.0 * math.factorial(n)) -def merge_multiplicities(result_array, max_multiplicity, verbosity=0): - """ - Function to fold the multiplicities higher than the max_multiplicity onto the max_multiplicity - if max_multiplicity == 1: - All the multiplicities are folded onto multiplicity == 1. This will always total to 1 - if max_multiplicity == 2: - The multiplicity fractions of the triple and quadruples are folded onto that of the binary multiplicity fraction - if max_multiplicity == 3: - The multiplicity fractions of the quadruples are folded onto that of the triples - """ + def get_max_multiplicity(self,multiplicity_array): + """ + Function to get the maximum multiplicity + """ + max_multiplicity = 0 + for n in range(4): + if multiplicity_array[n] > 0: + max_multiplicity = n + 1 + return max_multiplicity + + + def merge_multiplicities(self,result_array, max_multiplicity, verbosity=0): + """ + Function to fold the multiplicities higher than the max_multiplicity onto the max_multiplicity + + if max_multiplicity == 1: + All the multiplicities are folded onto multiplicity == 1. This will always total to 1 + if max_multiplicity == 2: + The multiplicity fractions of the triple and quadruples are folded onto that of the binary multiplicity fraction + if max_multiplicity == 3: + The multiplicity fractions of the quadruples are folded onto that of the triples + """ + + if not max_multiplicity in range(1, 5): + msg = "\tMoe and di Stefano 2017: merge_multiplicities: max_multiplicity has to be between 1 and 4. It is {} now".format( + max_multiplicity + ) + verbose_print( + msg, + verbosity, + 0, + ) + raise ValueError(msg) - if not max_multiplicity in range(1, 5): - msg = "\tMoe and di Stefano 2017: merge_multiplicities: max_multiplicity has to be between 1 and 4. It is {} now".format( - max_multiplicity + # Fold multiplicities: + verbose_print( + "\tMoe and di Stefano 2017: merge_multiplicities: Merging multiplicities with initial array {} and max multiplicity {}".format( + result_array, max_multiplicity + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, ) + for i in range(max_multiplicity, len(result_array))[::-1]: + result_array[i - 1] += result_array[i] + result_array[i] = 0 verbose_print( - msg, + "\tMoe and di Stefano 2017: merge_multiplicities: Merging multiplicities to new array {}".format( + result_array + ), verbosity, - 0, + _MOE2017_VERBOSITY_LEVEL, ) - raise ValueError(msg) - - # Fold multiplicities: - verbose_print( - "\tMoe and di Stefano 2017: merge_multiplicities: Merging multiplicities with initial array {} and max multiplicity {}".format( - result_array, max_multiplicity - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - for i in range(max_multiplicity, len(result_array))[::-1]: - result_array[i - 1] += result_array[i] - result_array[i] = 0 - verbose_print( - "\tMoe and di Stefano 2017: merge_multiplicities: Merging multiplicities to new array {}".format( - result_array - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - return result_array + return result_array -def normalize_dict(result_dict, verbosity=0): - """ - Function to normalise a dictionary - """ - sum_result = sum([result_dict[key] for key in result_dict.keys()]) - for key in result_dict.keys(): - result_dict[key] = result_dict[key] / sum_result - return result_dict -def Moe_di_Stefano_2017_multiplicity_fractions(options, verbosity=0): - """ - Function that creates a list of probability fractions and - normalises and merges them according to the users choice. + def Moe_di_Stefano_2017_multiplicity_fractions(self, options, verbosity=0): + """ + Function that creates a list of probability fractions and + normalises and merges them according to the users choice. - TODO: make an extrapolation functionality in this. log10(1.6e1) - is low, we can probably go a bit further + TODO: make an extrapolation functionality in this. log10(1.6e1) + is low, we can probably go a bit further - The default result that is returned when sampling the mass outside - of the mass range is now the last known value + The default result that is returned when sampling the mass outside + of the mass range is now the last known value - Returns a list of multiplicity fractions for a given input of mass - """ + Returns a list of multiplicity fractions for a given input of mass + """ - # Use the global Moecache - global Moecache + # Use the global Moecache + global Moecache - multiplicity_modulator_array = np.array( - options["multiplicity_modulator"] - ) # Modulator array + multiplicity_modulator_array = np.array( + options["multiplicity_modulator"] + ) # Modulator array - # Check for length - if not len(multiplicity_modulator_array) == 4: - msg = "Multiplicity modulator has to have 4 elements. Now it is {}, len: {}".format( - multiplicity_modulator_array, len(multiplicity_modulator_array) - ) - verbose_print( - msg, - verbosity, - 0, - ) - raise ValueError(msg) - - # Set up some arrays - full_fractions_array = np.zeros(4) # Meant to contain the real fractions - weighted_fractions_array = np.zeros( - 4 - ) # Meant to contain the fractions multiplied by the multiplicity modulator - - # Get max multiplicity - max_multiplicity = get_max_multiplicity(multiplicity_modulator_array) - - # ... it's better to interpolate the multiplicity and then - # use a Poisson distribution to calculate the fractions - # (this is more accurate) - - # Set up the multiplicity interpolator - if not Moecache.get("rinterpolator_multiplicity", None): - Moecache["rinterpolator_multiplicity"] = py_rinterpolate.Rinterpolate( - table=Moecache["multiplicity_table"], # Contains the table of data - nparams=1, # logM1 - ndata=4, # The number of datapoints (the parameters that we want to interpolate) - verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), - ) + # Check for length + if not len(multiplicity_modulator_array) == 4: + msg = "Multiplicity modulator has to have 4 elements. Now it is {}, len: {}".format( + multiplicity_modulator_array, len(multiplicity_modulator_array) + ) + verbose_print( + msg, + verbosity, + 0, + ) + raise ValueError(msg) - if options["multiplicity_model"] == "Poisson": - multiplicity = Moecache["rinterpolator_multiplicity"].interpolate( - [np.log10(options["M_1"])] - )[0] + # Set up some arrays + full_fractions_array = np.zeros(4) # Meant to contain the real fractions + weighted_fractions_array = np.zeros( + 4 + ) # Meant to contain the fractions multiplied by the multiplicity modulator + + # Get max multiplicity + max_multiplicity = self.get_max_multiplicity(multiplicity_modulator_array) + + # ... it's better to interpolate the multiplicity and then + # use a Poisson distribution to calculate the fractions + # (this is more accurate) + + # Set up the multiplicity interpolator + if not Moecache.get("rinterpolator_multiplicity", None): + Moecache["rinterpolator_multiplicity"] = py_rinterpolate.Rinterpolate( + table=Moecache["multiplicity_table"], # Contains the table of data + nparams=1, # logM1 + ndata=4, # The number of datapoints (the parameters that we want to interpolate) + verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), + ) - # Fill the multiplicity array - for n in range(4): - full_fractions_array[n] = poisson(multiplicity, n, 3, verbosity) + if options["multiplicity_model"] == "Poisson": + multiplicity = Moecache["rinterpolator_multiplicity"].interpolate( + [np.log10(options["M_1"])] + )[0] - # Normalize it so it fills to one when taking all the multiplicities: - full_fractions_array = full_fractions_array / np.sum(full_fractions_array) + # Fill the multiplicity array + for n in range(4): + full_fractions_array[n] = self.poisson(multiplicity, n, 3, verbosity) - verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( - "Poisson", full_fractions_array - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # Normalize it so it fills to one when taking all the multiplicities: + full_fractions_array = full_fractions_array / np.sum(full_fractions_array) - elif options["multiplicity_model"] == "data": - # use the fractions calculated from Moe's data directly - # - # note that in this case, there are no quadruples: these - # are combined with triples + verbose_print( + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( + "Poisson", full_fractions_array + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - # Fill with the raw values - for n in range(3): - full_fractions_array[n] = Moecache[ - "rinterpolator_multiplicity" - ].interpolate([np.log10(options["M_1"])])[n + 1] + elif options["multiplicity_model"] == "data": + # use the fractions calculated from Moe's data directly + # + # note that in this case, there are no quadruples: these + # are combined with triples - # Set last value - full_fractions_array[3] = 0.0 # no quadruples - verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( - "data", full_fractions_array - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # Fill with the raw values + for n in range(3): + full_fractions_array[n] = Moecache[ + "rinterpolator_multiplicity" + ].interpolate([np.log10(options["M_1"])])[n + 1] - # Normalisation: - if options["normalize_multiplicities"] == "raw": - # Don't multiply by the multiplicity_array, but do give a fractions array - verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Not normalising (using raw results): results: {}".format( - full_fractions_array - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - result = full_fractions_array + # Set last value + full_fractions_array[3] = 0.0 # no quadruples + verbose_print( + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: using model {}: full_fractions_array: {}".format( + "data", full_fractions_array + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - elif options["normalize_multiplicities"] == "norm": - # Multiply the full_multiplicity_fraction array by the multiplicity_multiplier_array, creating a weighted fractions array - weighted_fractions_array = full_fractions_array * multiplicity_modulator_array + # Normalisation: + if options["normalize_multiplicities"] == "raw": + # Don't multiply by the multiplicity_array, but do give a fractions array + verbose_print( + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Not normalising (using raw results): results: {}".format( + full_fractions_array + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + result = full_fractions_array - # Normalise this so it is in total 1: - result = weighted_fractions_array / np.sum(weighted_fractions_array) + elif options["normalize_multiplicities"] == "norm": + # Multiply the full_multiplicity_fraction array by the multiplicity_multiplier_array, creating a weighted fractions array + weighted_fractions_array = full_fractions_array * multiplicity_modulator_array - verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}. result: {}".format( - "norm", result - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # Normalise this so it is in total 1: + result = weighted_fractions_array / np.sum(weighted_fractions_array) - elif options["normalize_multiplicities"] == "merge": - # We first take the full multiplicity array - # (i.e. not multiplied by multiplier) and do the merging - result = merge_multiplicities( - full_fractions_array, max_multiplicity, verbosity=verbosity - ) + verbose_print( + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}. result: {}".format( + "norm", result + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + + elif options["normalize_multiplicities"] == "merge": + # We first take the full multiplicity array + # (i.e. not multiplied by multiplier) and do the merging + result = self.merge_multiplicities( + full_fractions_array, max_multiplicity, verbosity=verbosity + ) - # Then normalise to be sure - result = result / np.sum(result) + # Then normalise to be sure + result = result / np.sum(result) + + verbose_print( + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}, max_multiplicity={} result={}".format( + "merge", max_multiplicity, result + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: Normalising with {}, max_multiplicity={} result={}".format( - "merge", max_multiplicity, result + "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: {}".format( + str(result) ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - verbose_print( - "\tMoe and di Stefano 2017: Moe_di_Stefano_2017_multiplicity_fractions: {}".format( - str(result) - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # return array reference + return result - # return array reference - return result + def build_q_table(self,options, m, p, verbosity=0): + ############################################################ + # Build an interpolation table for q, given a mass and + # orbital period. + # + # m and p are labels which determine which system(s) + # to look up from Moe's data: + # + # m can be M1, M2, M3, M4, or if set M1+M2 etc. + # p can be P, P2, P3 + # + # The actual values are in $opts: + # + # mass is in $opts->{m} + # period is $opts->{p} + # + # Since the information from the table for Moe and di Stefano 2017 is independent of any choice we make, + # we need to take into account that for example our choice of minimum mass leads to + # a minimum q_min that is not the same as in the table + # We should ignore those parts of the table and renormalise. + # If we are below the lowest value of qmin in the table we need to extrapolate the data + # + # Anyway, the goal of this function is to provide some extrapolated values for q when we should sample outside of the boundaries + ############################################################ -def build_q_table(options, m, p, verbosity=0): - ############################################################ - # - # Build an interpolation table for q, given a mass and - # orbital period. - # - # $m and $p are labels which determine which system(s) - # to look up from Moe's data: - # - # $m can be M1, M2, M3, M4, or if set M1+M2 etc. - # $p can be P, P2, P3 - # - # The actual values are in $opts: - # - # mass is in $opts->{$m} - # period is $opts->{$p} - # - # Since the information from the table for Moe and di Stefano 2017 is independent of any choice we make, - # we need to take into account that for example our choice of minimum mass leads to - # a minimum q_min that is not the same as in the table - # We should ignore those parts of the table and renormalise. - # If we are below the lowest value of qmin in the table we need to extrapolate the data - # - # Anyway, the goal of this function is to provide some extrapolated values for q when we should sample outside of the boundaries - ############################################################ - - # We can check if we have a cached value for this already: - # TODO: fix this cache check. - incache = False - if Moecache.get("rinterpolator_q_metadata", None): - if (Moecache["rinterpolator_q_metadata"].get(m, None)) and ( - Moecache["rinterpolator_q_metadata"].get(p, None) - ): - if (Moecache["rinterpolator_q_metadata"][m] == options[m]) and ( - Moecache["rinterpolator_q_metadata"][p] == options[p] + # We can check if we have a cached value for this already: + # TODO: fix this cache check. + incache = False + if Moecache.get("rinterpolator_q_metadata", None): + if (Moecache["rinterpolator_q_metadata"].get(m, None)) and ( + Moecache["rinterpolator_q_metadata"].get(p, None) ): - incache = True - - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Found cached values for m={} p={}".format( - options[m], options[p] - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - else: - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Cached values for different m={} p={}. Freeing current table and making new table".format( - options[m], options[p] - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - - # - if not incache: - # trim and/or expand the table to the range $qmin to $qmax. + if (Moecache["rinterpolator_q_metadata"][m] == options[m]) and ( + Moecache["rinterpolator_q_metadata"][p] == options[p] + ): + incache = True - # qmin is set by the minimum stellar mass : below this - # the companions are planets - # qmin = options["ranges"]["M"][ - # 0 - # ] # TODO: this lower range must not be lower than Mmin. - - qmin = options["Mmin"] / options["M_1"] - verbose_print( - "\tMoe and di Stefano 2017: build_q_table qmin: {}".format( - qmin, - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - - # qmax = maximum_mass_ratio_for_RLOF(options[m], options[p]) - # TODO: change this to the above - qmax = 1 + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: Found cached values for m={} p={}".format( + options[m], options[p] + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + else: + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: Cached values for different m={} p={}. Freeing current table and making new table".format( + options[m], options[p] + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - # qdata contains the table that we modify: we get - # the original data by interpolating Moe's table - qdata = {} - can_renormalize = 1 + # + if not incache: + # trim and/or expand the table to the range qmin to qmax. - qeps = 1e-8 # small number but such that qeps+1 != 1 - if qeps + 1 == 1.0: + # qmin is set by the minimum stellar mass + qmin = options["Mmin"] / options["M_1"] verbose_print( - "\tMoe and di Stefano 2017: build_q_table: qeps (= {}) +1 == 1. Make qeps larger".format( - qeps, + "\tMoe and di Stefano 2017: build_q_table qmin: {}".format( + qmin, ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - if qmin >= qmax: - # there may be NO binaries in this part of the parameter space: - # in which case, set up a table with lots of zero in it - - qdata = {0: 0, 1: 0} - can_renormalize = 0 + # qmax = maximum_mass_ratio_for_RLOF(options[m], options[p]) + # TODO: change this to the above + qmax = 1 - else: - # qmin and qmax mean we'll get something non-zero + # qdata contains the table that we modify: we get + # the original data by interpolating Moe's table + qdata = {} can_renormalize = 1 - # require extrapolation sets whether we need to extrapolate - # at the low and high ends - require_extrapolation = {} - - if qmin >= 0.15: - # qmin is inside Moe's table : this is easy, - # we just keep points from qmin at the low - # end to qmax at the high end. - require_extrapolation["low"] = 0 - require_extrapolation[ - "high" - ] = 1 # TODO: shouldn't the extrapolation need to happen if qmax > 0.95 - qdata[qmin] = Moecache["rinterpolator_q"].interpolate( - [np.log10(options[m]), np.log10(options[p]), qmin] - )[0] + qeps = 1e-8 # small number but such that qeps+1 != 1 + if qeps + 1 == 1.0: + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: qeps (= {}) +1 == 1. Make qeps larger".format( + qeps, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - for q in np.arange(0.15, 0.950001, 0.1): - if (q >= qmin) and (q <= qmax): - qdata[q] = Moecache["rinterpolator_q"].interpolate( - [np.log10(options[m]), np.log10(options[p]), q] - )[0] - else: - require_extrapolation["low"] = 1 - require_extrapolation["high"] = 1 - if qmax < 0.15: - # qmax < 0.15 which is off the edge - # of the table. In this case, choose - # two points at q=0.15 and 0.16 and interpolate - # at these in case we want to extrapolate. - for q in [0.15, 0.16]: - qdata[q] = Moecache["rinterpolator_q"].interpolate( - [np.log10(options[m]), np.log10(options[p]), q] - )[0] - else: - # qmin < 0.15 and qmax > 0.15, so we - # have to generate Moe's table for - # q = 0.15 (i.e. 0.1 to 0.2) to 0.95 (0.9 to 1) - # as a function of M1 and orbital period, - # to obtain the q distribution data. - - for q in np.arange(0.15, np.min([0.950001, qmax + 0.0001]), 0.1): - val = Moecache["rinterpolator_q"].interpolate( - [np.log10(options[m]), np.log10(options[p]), q] - )[0] - qdata[q] = val + if qmin >= qmax: + # there may be NO binaries in this part of the parameter space: + # in which case, set up a table with lots of zero in it - # just below qmin, if qmin>qeps, we want nothing - if qmin - 0.15 > qeps: - q = qmin - qeps - qdata[q] = 0 - require_extrapolation["low"] = 0 + qdata = {0: 0, 1: 0} + can_renormalize = 0 - # just above qmax, if qmax<1, we want nothing - if qmax < 0.95: - q = qmax + qeps - qdata[q] = 0 - require_extrapolation["high"] = 0 + else: + # qmin and qmax mean we'll get something non-zero + can_renormalize = 1 - # sorted list of qs - qs = sorted(qdata.keys()) + # require extrapolation sets whether we need to extrapolate + # at the low and high ends + require_extrapolation = {} - if len(qs) == 0: - msg = "No qs found error" - raise ValueError(msg) + if qmin >= 0.15: + # qmin is inside Moe's table : this is easy, + # we just keep points from qmin at the low + # end to qmax at the high end. + require_extrapolation["low"] = 0 + require_extrapolation[ + "high" + ] = 1 # TODO: shouldn't the extrapolation need to happen if qmax > 0.95 + qdata[qmin] = Moecache["rinterpolator_q"].interpolate( + [np.log10(options[m]), np.log10(options[p]), qmin] + )[0] - elif len(qs) == 1: - # only one q value : pretend there are two - # with a flat distribution up to 1.0. - if qs[0] == 1.0: - qs[0] = 1.0 - 1e-6 - qs.append(1) - qdata[qs[0]] = 1 - qdata[qs[1]] = 1 + for q in np.arange(0.15, 0.950001, 0.1): + if (q >= qmin) and (q <= qmax): + qdata[q] = Moecache["rinterpolator_q"].interpolate( + [np.log10(options[m]), np.log10(options[p]), q] + )[0] else: - qs.append(1) - qdata[qs[1]] = qs[0] - - # We actually should do the extrapolation now. - else: - # Loop over both the lower end and the upper end - for pre in ["low", "high"]: - if require_extrapolation[pre] == 0: - continue + require_extrapolation["low"] = 1 + require_extrapolation["high"] = 1 + if qmax < 0.15: + # qmax < 0.15 which is off the edge + # of the table. In this case, choose + # two points at q=0.15 and 0.16 and interpolate + # at these in case we want to extrapolate. + for q in [0.15, 0.16]: + qdata[q] = Moecache["rinterpolator_q"].interpolate( + [np.log10(options[m]), np.log10(options[p]), q] + )[0] else: - sign = -1 if pre == "low" else 1 - end_index = 0 if pre == "low" else len(qs) - 1 - indices = ( - [0, 1] if pre == "low" else [len(qs) - 1, len(qs) - 2] - ) # Based on whether we do the high or low end we need to use two different indices - method = options.get( - "q_{}_extrapolation_method".format(pre), None - ) - qlimit = qmin if pre == "low" else qmax - - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Extrapolating: Q: {} method: {}, indices: {} End index: {}".format( - pre, method, indices, end_index - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # qmin < 0.15 and qmax > 0.15, so we + # have to generate Moe's table for + # q = 0.15 (i.e. 0.1 to 0.2) to 0.95 (0.9 to 1) + # as a function of M1 and orbital period, + # to obtain the q distribution data. + + for q in np.arange(0.15, np.min([0.950001, qmax + 0.0001]), 0.1): + val = Moecache["rinterpolator_q"].interpolate( + [np.log10(options[m]), np.log10(options[p]), q] + )[0] + qdata[q] = val + + # just below qmin, if qmin>qeps, we want nothing + if qmin - 0.15 > qeps: + q = qmin - qeps + qdata[q] = 0 + require_extrapolation["low"] = 0 + + # just above qmax, if qmax<1, we want nothing + if qmax < 0.95: + q = qmax + qeps + qdata[q] = 0 + require_extrapolation["high"] = 0 + + # sorted list of qs + qs = sorted(qdata.keys()) + + if len(qs) == 0: + msg = "No qs found error" + raise ValueError(msg) + + elif len(qs) == 1: + # only one q value : pretend there are two + # with a flat distribution up to 1.0. + if qs[0] == 1.0: + qs[0] = 1.0 - 1e-6 + qs.append(1) + qdata[qs[0]] = 1 + qdata[qs[1]] = 1 + else: + qs.append(1) + qdata[qs[1]] = qs[0] - # truncate the distribution - qdata[max(0.0, min(1.0, qlimit + sign * qeps))] = 0 + # We actually should do the extrapolation now. + else: + # Loop over both the lower end and the upper end + for pre in ["low", "high"]: + if require_extrapolation[pre] == 0: + continue + else: + sign = -1 if pre == "low" else 1 + end_index = 0 if pre == "low" else len(qs) - 1 + indices = ( + [0, 1] if pre == "low" else [len(qs) - 1, len(qs) - 2] + ) # Based on whether we do the high or low end we need to use two different indices + method = options.get( + "q_{}_extrapolation_method".format(pre), None + ) + qlimit = qmin if pre == "low" else qmax - if method == None: - # no extrapolation : just interpolate between 0.10 and 0.95 verbose_print( - "\tMoe and di Stefano 2017: build_q_table: using no extrapolations".format(), + "\tMoe and di Stefano 2017: build_q_table: Extrapolating: Q: {} method: {}, indices: {} End index: {}".format( + pre, method, indices, end_index + ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - continue - elif method == "flat": - # use the end value and extrapolate it - # with zero slope - qdata[qlimit] = qdata[qs[end_index]] - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: using constant extrapolation".format(), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif method == "linear": - qdata[qlimit] = linear_extrapolation_q( - qs=qs, - indices=indices, - qlimit=qlimit, - qdata=qdata, - end_index=end_index, - verbosity=verbosity, - ) - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: using linear extrapolation".format(), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - if pre == "low": - below_qlimit = qlimit - qeps - if below_qlimit > 0: - qdata[below_qlimit] = 0 - qdata[0] = 0 + # truncate the distribution + qdata[max(0.0, min(1.0, qlimit + sign * qeps))] = 0 + + if method == None: + # no extrapolation : just interpolate between 0.10 and 0.95 verbose_print( - "\tMoe and di Stefano 2017: build_q_table: using linear extrapolation and setting the points below the lower q bound ({}) to 0 ".format( - qlimit - ), + "\tMoe and di Stefano 2017: build_q_table: using no extrapolations".format(), verbosity, _MOE2017_VERBOSITY_LEVEL, ) + continue + elif method == "flat": + # use the end value and extrapolate it + # with zero slope + qdata[qlimit] = qdata[qs[end_index]] + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: using constant extrapolation".format(), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif method == "linear": + qdata[qlimit] = self.linear_extrapolation_q( + qs=qs, + indices=indices, + qlimit=qlimit, + qdata=qdata, + end_index=end_index, + verbosity=verbosity, + ) - elif method == "plaw2": - qdata[qlimit] = powerlaw_extrapolation_q( - qs=qs, indices=indices, qdata=qdata, verbosity=verbosity - ) - - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: using powerlaw extrapolation".format(), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif method == "nolowq": - newq = 0.05 - qdata[newq] = 0 - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: setting lowq to 0".format(), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif method == "poly": - # TODO: consider implementing the poly method (see Perl version) - raise ValueError( - "Moe and di Stefano 2017: build_q_table: Method 'poly' not implemented" - ) + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: using linear extrapolation".format(), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + if pre == "low": + below_qlimit = qlimit - qeps + if below_qlimit > 0: + qdata[below_qlimit] = 0 + qdata[0] = 0 + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: using linear extrapolation and setting the points below the lower q bound ({}) to 0 ".format( + qlimit + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + + elif method == "plaw2": + qdata[qlimit] = self.powerlaw_extrapolation_q( + qs=qs, indices=indices, qdata=qdata, verbosity=verbosity + ) - else: - msg = "\tMoe and di Stefano 2017: build_q_table: Error no other methods available. The chosen method ({}) does not exist!".format( - method - ) - verbose_print( - msg, - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - raise ValueError(msg) - - # regenerate qs in new table. This is now the updated list of qs where we have some extrapolated numbers - tmp_table = [] - for q in sorted(qdata.keys()): - tmp_table.append([q, qdata[q]]) - - # Make an interpolation table to contain our modified data - q_interpolator = py_rinterpolate.Rinterpolate( - table=tmp_table, - nparams=1, - ndata=1, # Contains the table of data # q # - verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), - ) - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Created a new Q table", - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: using powerlaw extrapolation".format(), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif method == "nolowq": + newq = 0.05 + qdata[newq] = 0 + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: setting lowq to 0".format(), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif method == "poly": + # TODO: consider implementing the poly method (see Perl version) + raise ValueError( + "Moe and di Stefano 2017: build_q_table: Method 'poly' not implemented" + ) - if can_renormalize: + else: + msg = "\tMoe and di Stefano 2017: build_q_table: Error no other methods available. The chosen method ({}) does not exist!".format( + method + ) + verbose_print( + msg, + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + raise ValueError(msg) + + # regenerate qs in new table. This is now the updated list of qs where we have some extrapolated numbers + tmp_table = [] + for q in sorted(qdata.keys()): + tmp_table.append([q, qdata[q]]) + + # Make an interpolation table to contain our modified data + q_interpolator = py_rinterpolate.Rinterpolate( + table=tmp_table, + nparams=1, + ndata=1, # Contains the table of data # q # + verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), + ) verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Renormalising table", + "\tMoe and di Stefano 2017: build_q_table: Created a new Q table", verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # now we integrate and renormalise (if the table is not all zero) - I = get_integration_constant_q( - q_interpolator, tmp_table, qdata, verbosity=verbosity - ) - - if I > 0: - # normalise to 1.0 by dividing the data by 1.0/$I - q_interpolator.multiply_table_column(1, 1.0 / I) + if can_renormalize: + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: Renormalising table", + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - # test this - new_I = get_integration_constant_q( + # now we integrate and renormalise (if the table is not all zero) + I = self.get_integration_constant_q( q_interpolator, tmp_table, qdata, verbosity=verbosity ) - # fail if error in integral > 1e-6 (should be ~ machine precision) - if abs(1.0 - new_I) > 1e-6: - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: Error: > 1e-6 in q probability integral: {}".format( - I - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # set this new table in the cache - Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: stored q_interpolater as {}".format( - "rinterpolator_q_given_{}_log10{}".format(m, p) - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - - # Store the values for which this table was set up in the dict - if not Moecache.get("rinterpolator_q_metadata", None): - Moecache["rinterpolator_q_metadata"] = {} - Moecache["rinterpolator_q_metadata"][m] = options[m] - Moecache["rinterpolator_q_metadata"][p] = options[p] + if I > 0: + # normalise to 1.0 by dividing the data by 1.0/$I + q_interpolator.multiply_table_column(1, 1.0 / I) + # test this + new_I = self.get_integration_constant_q( + q_interpolator, tmp_table, qdata, verbosity=verbosity + ) -def powerlaw_extrapolation_q(qdata, qs, indices, verbosity=0): - """ - Function to do the power law extrapolation at the lower end of the q range - """ - newq = 0.05 - - # use a power-law extrapolation down to q=0.05, if possible - if (qdata[qs[indices[0]]] == 0.0) and (qdata[qs[indices[1]]] == 0.0): - # not possible - return 0 - - else: - slope = (np.log10(qdata[qs[indices[1]]]) - np.log10(qdata[qs[indices[0]]])) / ( - np.log10(qs[indices[1]]) - np.log10(qs[indices[0]]) - ) - intercept = np.log10(qdata[qs[indices[0]]]) - slope * np.log10(qs[indices[0]]) + # fail if error in integral > 1e-6 (should be ~ machine precision) + if abs(1.0 - new_I) > 1e-6: + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: Error: > 1e-6 in q probability integral: {}".format( + I + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + # set this new table in the cache + Moecache["rinterpolator_q_given_{}_log10{}".format(m, p)] = q_interpolator + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: stored q_interpolater as {}".format( + "rinterpolator_q_given_{}_log10{}".format(m, p) + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - return slope * newq + intercept + # Store the values for which this table was set up in the dict + if not Moecache.get("rinterpolator_q_metadata", None): + Moecache["rinterpolator_q_metadata"] = {} + Moecache["rinterpolator_q_metadata"][m] = options[m] + Moecache["rinterpolator_q_metadata"][p] = options[p] -def linear_extrapolation_q(qs, indices, qlimit, qdata, end_index, verbosity=0): - """ - Function to do the linear extrapolation for q. - """ + def powerlaw_extrapolation_q(self, qdata, qs, indices, verbosity=0): + """ + Function to do the power-law extrapolation at the lower end of the q range + """ + newq = 0.05 - # linear extrapolation - dq = qs[indices[1]] - qs[indices[0]] + # use a power-law extrapolation down to q=0.05, if possible + if (qdata[qs[indices[0]]] == 0.0) and (qdata[qs[indices[1]]] == 0.0): + # not possible + return 0 - if dq == 0: - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: linear dq=0".format(), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # No change - return qs[end_index] - else: - slope = (qdata[qs[indices[1]]] - qdata[qs[indices[0]]]) / dq + else: + slope = (np.log10(qdata[qs[indices[1]]]) - np.log10(qdata[qs[indices[0]]])) / ( + np.log10(qs[indices[1]]) - np.log10(qs[indices[0]]) + ) + intercept = np.log10(qdata[qs[indices[0]]]) - slope * np.log10(qs[indices[0]]) - intercept = qdata[qs[indices[0]]] - slope * qs[indices[0]] - qdata[qlimit] = max(0.0, slope * qlimit + intercept) - verbose_print( - "\tMoe and di Stefano 2017: build_q_table: linear Slope: {} intercept: {} dn/dq({}) = {}".format( - slope, intercept, qlimit, qdata[qlimit] - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - return max(0.0, slope * qlimit + intercept) + return slope * newq + intercept -def get_integration_constant_q(q_interpolator, tmp_table, qdata, verbosity=0): - """ - Function to integrate the q interpolator and return the integration constant - """ + def linear_extrapolation_q(self,qs, indices, qlimit, qdata, end_index, verbosity=0): + """ + Function to do the linear extrapolation for q. + """ - dq = 1e-3 # resolution of the integration/renormalisation - I = 0 + # linear extrapolation + dq = qs[indices[1]] - qs[indices[0]] - # integrate: note that the value of the integral is - # meaningless to within a factor (which depends on $dq) - for q in np.arange(0, 1 + 2e-6, dq): - x = q_interpolator.interpolate([q]) - if len(x) == 0: - msg = "\tMoe and di Stefano 2017: build_q_table: Q interpolator table interpolation failed.\n\t\ttmp_table = {}\n\t\tq_data = {}".format( - str(tmp_table), str(qdata) - ) + if dq == 0: verbose_print( - msg, + "\tMoe and di Stefano 2017: build_q_table: linear dq=0".format(), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - raise ValueError(msg) + # No change + return qs[end_index] else: - I += x[0] * dq - # verbose_print( - # "\tMoe and di Stefano 2017: build_q_table: dn/dq ({}) = {} I -> = {}".format(q, x[0], I), - # verbosity, - # _MOE2017_VERBOSITY_LEVEL, - # ) - return I - - -def fill_data(sample_values, data_dict): - """ - Function that returns the normalised array of values for given logmass and logperiod - used for the e and q values - - TODO: make sure we do the correct thing with the dstep - """ + slope = (qdata[qs[indices[1]]] - qdata[qs[indices[0]]]) / dq - data = {} - I = 0 + intercept = qdata[qs[indices[0]]] - slope * qs[indices[0]] + qdata[qlimit] = max(0.0, slope * qlimit + intercept) + verbose_print( + "\tMoe and di Stefano 2017: build_q_table: linear Slope: {} intercept: {} dn/dq({}) = {}".format( + slope, intercept, qlimit, qdata[qlimit] + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + return max(0.0, slope * qlimit + intercept) - dstep = float(sample_values[1]) - float(sample_values[0]) - # Read out the data - for sample_value in sample_values: - val = data_dict[sample_value] - data[sample_value] = val - I += val + def get_integration_constant_q(self,q_interpolator, tmp_table, qdata, verbosity=0): + """ + Function to integrate the q interpolator and return the integration constant + """ - # Normalise the data - for sample_value in sample_values: - data[sample_value] = data[sample_value] / I + dq = 1e-3 # resolution of the integration/renormalisation + I = 0 - return data + # integrate: note that the value of the integral is + # meaningless to within a factor (which depends on $dq) + for q in np.arange(0, 1 + 2e-6, dq): + x = q_interpolator.interpolate([q]) + if len(x) == 0: + msg = "\tMoe and di Stefano 2017: build_q_table: Q interpolator table interpolation failed.\n\t\ttmp_table = {}\n\t\tq_data = {}".format( + str(tmp_table), str(qdata) + ) + verbose_print( + msg, + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + raise ValueError(msg) + else: + I += x[0] * dq + return I -def calc_e_integral( - options, - integrals_string, - interpolator_name, - mass_string, - period_string, - verbosity=0, -): - """ - Function to calculate the P integral + def fill_data(self,sample_values, data_dict): + """ + Function that returns the normalised array of values for given logmass and logperiod + used for the e and q values - We need to renormalise this because min_per > 0, and not all periods should be included - """ + TODO: make sure we do the correct thing with the dstep + """ - global Moecache - min_ecc = 0 - max_ecc = 0.9999 + data = {} + I = 0 - mass_period_string = "{}_{}".format(options[mass_string], options[period_string]) + dstep = float(sample_values[1]) - float(sample_values[0]) - # Check if the dict exists - if not Moecache.get(integrals_string, None): - Moecache[integrals_string] = {} + # Read out the data + for sample_value in sample_values: + val = data_dict[sample_value] + data[sample_value] = val + I += val - # Check for cached value. If it doesn't exist: calculate - if not Moecache[integrals_string].get(mass_period_string, None): - I = 0 - decc = 1e-3 + # Normalise the data + for sample_value in sample_values: + data[sample_value] = data[sample_value] / I - for ecc in np.arange(min_ecc, max_ecc, decc): - # Loop over all the values in the table, between the min and max P - dp_decc = Moecache[interpolator_name].interpolate( - [np.log10(options[mass_string]), np.log10(options[period_string]), ecc] - )[0] + return data - I += dp_decc * decc - # Set the integral value in the dict - Moecache[integrals_string][mass_period_string] = I - verbose_print( - "\tMoe and di Stefano 2017: calc_ecc_integral: min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( - min_ecc, - max_ecc, - integrals_string, - interpolator_name, - mass_string, - period_string, - options[mass_string], - options[period_string], - I, - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - else: - verbose_print( - "\tMoe and di Stefano 2017: calc_ecc_integral: Found cached value for min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( - min_ecc, - max_ecc, - integrals_string, - interpolator_name, - mass_string, - period_string, - options[mass_string], - options[period_string], - Moecache[integrals_string][mass_period_string], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + def calc_e_integral( + self, + options, + integrals_string, + interpolator_name, + mass_string, + period_string, + verbosity=0, + ): + """ + Function to calculate the e integral + + We need to renormalise this because min_per > 0, and not all periods should be included + """ + + global Moecache + min_ecc = 0 + max_ecc = 0.9999 + + mass_period_string = "{}_{}".format(options[mass_string], options[period_string]) + + # Check if the dict exists + if not Moecache.get(integrals_string, None): + Moecache[integrals_string] = {} + + # Check for cached value. If it doesn't exist: calculate + if not Moecache[integrals_string].get(mass_period_string, None): + I = 0 + decc = 1e-3 + + for ecc in np.arange(min_ecc, max_ecc, decc): + # Loop over all the values in the table, between the min and max P + dp_decc = Moecache[interpolator_name].interpolate( + [np.log10(options[mass_string]), np.log10(options[period_string]), ecc] + )[0] + I += dp_decc * decc -def calc_P_integral( - options, - min_logP, - max_logP, - integrals_string, - interpolator_name, - mass_string, - verbosity=0, -): - """ - Function to calculate the P integral + # Set the integral value in the dict + Moecache[integrals_string][mass_period_string] = I + verbose_print( + "\tMoe and di Stefano 2017: calc_ecc_integral: min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( + min_ecc, + max_ecc, + integrals_string, + interpolator_name, + mass_string, + period_string, + options[mass_string], + options[period_string], + I, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + else: + verbose_print( + "\tMoe and di Stefano 2017: calc_ecc_integral: Found cached value for min_ecc: {} max ecc: {} integrals_string: {} interpolator_name: {} mass_string: {} period_string: {} mass: {} period: {} I: {}".format( + min_ecc, + max_ecc, + integrals_string, + interpolator_name, + mass_string, + period_string, + options[mass_string], + options[period_string], + Moecache[integrals_string][mass_period_string], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - We need to renormalise this because min_per > 0, and not all periods should be included - """ - global Moecache + def calc_P_integral( + self, + options, + min_logP, + max_logP, + integrals_string, + interpolator_name, + mass_string, + verbosity=0, + ): + """ + Function to calculate the P integral + + We need to renormalise this because min_per > 0, and not all periods should be included + """ + + global Moecache + + # Check if the dict exists + if not Moecache.get(integrals_string, None): + Moecache[integrals_string] = {} + + # Check for cached value. If it doesn't exist: calculate + if not Moecache[integrals_string].get(options[mass_string], None): + I = 0 + dlogP = 1e-3 + + for logP in np.arange(min_logP, max_logP, dlogP): + # Loop over all the values in the table, between the min and max P + dp_dlogP = Moecache[interpolator_name].interpolate( + [np.log10(options[mass_string]), logP] + )[0] - # Check if the dict exists - if not Moecache.get(integrals_string, None): - Moecache[integrals_string] = {} + I += dp_dlogP * dlogP - # Check for cached value. If it doesn't exist: calculate - if not Moecache[integrals_string].get(options[mass_string], None): - I = 0 - dlogP = 1e-3 + # Set the integral value in the dict + Moecache[integrals_string][options[mass_string]] = I + verbose_print( + "\tMoe and di Stefano 2017: calc_P_integral: min_logP: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( + min_logP, + integrals_string, + interpolator_name, + mass_string, + options[mass_string], + I, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + else: + verbose_print( + "\tMoe and di Stefano 2017: calc_P_integral: Found cached value for min_logP: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( + min_logP, + integrals_string, + interpolator_name, + mass_string, + options[mass_string], + Moecache[integrals_string][options[mass_string]], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - for logP in np.arange(min_logP, max_logP, dlogP): - # Loop over all the values in the table, between the min and max P - dp_dlogP = Moecache[interpolator_name].interpolate( - [np.log10(options[mass_string]), logP] - )[0] - I += dp_dlogP * dlogP + def calc_total_probdens(self, + prob_dict): + """ + Function to calculate the total probability density + """ - # Set the integral value in the dict - Moecache[integrals_string][options[mass_string]] = I - verbose_print( - "\tMoe and di Stefano 2017: calc_P_integral: min_logP: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( - min_logP, - integrals_string, - interpolator_name, - mass_string, - options[mass_string], - I, - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - else: - verbose_print( - "\tMoe and di Stefano 2017: calc_P_integral: Found cached value for min_logP: {} integrals_string: {} interpolator_name: {} mass_string: {} mass: {} I: {}".format( - min_logP, - integrals_string, - interpolator_name, - mass_string, - options[mass_string], - Moecache[integrals_string][options[mass_string]], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + total_probdens = 1 + for key in prob_dict: + total_probdens *= prob_dict[key] + prob_dict["total_probdens"] = total_probdens + return prob_dict -def calc_total_probdens(prob_dict): - """ - Function to calculate the total probability density - """ - total_probdens = 1 - for key in prob_dict: - total_probdens *= prob_dict[key] - prob_dict["total_probdens"] = total_probdens + def Moe_di_Stefano_2017_pdf(self,options, verbosity=0): + """ + Moe & diStefano function to calculate the probability density. - return prob_dict + takes a dictionary as input (in options) with options: + M1, M2, M3, M4 => masses (Msun) [M1 required, rest optional] + P, P2, P3 => periods (days) [number: none=binary, 2=triple, 3=quadruple] + ecc, ecc2, ecc3 => eccentricities [numbering as for P above] -def Moe_di_Stefano_2017_pdf(options, verbosity=0): - """ - Moe & diStefano function to calculate the probability density. + mmin => minimum allowed stellar mass (default 0.07) + mmax => maximum allowed stellar mass (default 80.0) + """ - takes a dictionary as input (in options) with options: + verbose_print( + "\tMoe_di_Stefano_2017_pdf with options:\n\t\t{}".format(json.dumps(options,ensure_ascii=False)), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - M1, M2, M3, M4 => masses (Msun) [M1 required, rest optional] - P, P2, P3 => periods (days) [number: none=binary, 2=triple, 3=quadruple] - ecc, ecc2, ecc3 => eccentricities [numbering as for P above] + prob_dict = ( + {} + ) # Dictionary containing all the pdf values for the different parameters - mmin => minimum allowed stellar mass (default 0.07) - mmax => maximum allowed stellar mass (default 80.0) - """ + # Get the multiplicity from the options, and if its not there, calculate it based on the + # TODO: the function below makes no sense. We NEED to pass the multiplicity in the + if not options.get("multiplicity", None): + msg = "\tMoe_di_Stefano_2017_pdf: Did not find a multiplicity value in the options dictionary" + verbose_print( + msg, + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + raise ValueError(msg) + # multiplicity = 1 + # for n in range(2, 5): + # multiplicity += 1 if options.get("M{}".format(n), None) else 0 + else: + multiplicity = options["multiplicity"] - verbose_print( - "\tMoe_di_Stefano_2017_pdf with options:\n\t\t{}".format(json.dumps(options,ensure_ascii=False)), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # Immediately return 0 if the multiplicity modulator is 0 + if options["multiplicity_modulator"][int(multiplicity) - 1] == 0: + verbose_print( + "\tMoe_di_Stefano_2017_pdf: returning 0 because of the multiplicity modulator being 0", + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + return 0 - prob_dict = ( - {} - ) # Dictionary containing all the pdf values for the different parameters + ############################################################ + # multiplicity fraction + # Calculate the probability, or rather, fraction, of stars that belong to this mass - # Get the multiplicity from the options, and if its not there, calculate it based on the - # TODO: the function below makes no sense. We NEED to pass the multiplicity in the - if not options.get("multiplicity", None): - msg = "\tMoe_di_Stefano_2017_pdf: Did not find a multiplicity value in the options dictionary" + multiplicity_probability = self.Moe_di_Stefano_2017_multiplicity_fractions( + options, verbosity + )[int(multiplicity) - 1] + prob_dict["multiplicity"] = multiplicity_probability verbose_print( - msg, + "\tMoe_di_Stefano_2017_pdf: Appended multiplicity (mass1 = {}) probability ({}) to the prob dict ({})".format( + options["M_1"], prob_dict["multiplicity"], prob_dict + ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - raise ValueError(msg) - # multiplicity = 1 - # for n in range(2, 5): - # multiplicity += 1 if options.get("M{}".format(n), None) else 0 - else: - multiplicity = options["multiplicity"] - - # Immediately return 0 if the multiplicity modulator is 0 - if options["multiplicity_modulator"][int(multiplicity) - 1] == 0: + + ############################################################ + # always require an IMF for the primary star + # + # NB multiply by M1 to convert dN/dM to dN/dlnM + # (dlnM = dM/M, so 1/dlnM = M/dM) + + # TODO: Create an n-part-powerlaw method that can have breakpoints and slopes. I'm using a three-part power law now. + # TODO: is this actually the correct way? putting the M1 in there? Do we sample in log space? + M1_probability = self.Kroupa2001(options["M_1"]) * options["M_1"] + prob_dict["M_1"] = M1_probability verbose_print( - "\tMoe_di_Stefano_2017_pdf: returning 0 because of the multiplicity modulator being 0", + "\tMoe_di_Stefano_2017_pdf: Appended Mass (m={}) probability ({}) to the prob dict ({})".format( + options["M_1"], prob_dict["M_1"], prob_dict + ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - return 0 - - ############################################################ - # multiplicity fraction - # Calculate the probability, or rather, fraction, of stars that belong to this mass - - multiplicity_probability = Moe_di_Stefano_2017_multiplicity_fractions( - options, verbosity - )[int(multiplicity) - 1] - prob_dict["multiplicity"] = multiplicity_probability - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended multiplicity (mass1 = {}) probability ({}) to the prob dict ({})".format( - options["M_1"], prob_dict["multiplicity"], prob_dict - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - - ############################################################ - # always require an IMF for the primary star - # - # NB multiply by M1 to convert dN/dM to dN/dlnM - # (dlnM = dM/M, so 1/dlnM = M/dM) - - # TODO: Create an n-part-powerlaw method that can have breakpoints and slopes. I'm using a three-part power law now. - # TODO: is this actually the correct way? putting the M1 in there? Do we sample in log space? - M1_probability = Kroupa2001(options["M_1"]) * options["M_1"] - prob_dict["M_1"] = M1_probability - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended Mass (m={}) probability ({}) to the prob dict ({})".format( - options["M_1"], prob_dict["M_1"], prob_dict - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # if M1_probability == 0: # If the probability is 0 then we don't have to calculate more - # calc_total_probdens(prob_dict) - # return prob_dict - - """ - From here we go through the multiplicities. - """ - if multiplicity >= 2: - # If the multiplicity is higher than 1, we will need to construct the following tables: - # - period distribution table - # - q distribution table - # - eccentricity distribution table - - # Set up the interpolator for the periods - if not Moecache.get("rinterpolator_log10P", None): - Moecache["rinterpolator_log10P"] = py_rinterpolate.Rinterpolate( - table=Moecache["period_distributions"], # Contains the table of data - nparams=2, # log10M, log10P - ndata=2, # binary, triple - verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), - ) - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Created new period interpolator: {}".format( - Moecache["rinterpolator_log10P"] - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # if M1_probability == 0: # If the probability is 0 then we don't have to calculate more + # calc_total_probdens(prob_dict) + # return prob_dict - # Make a table storing Moe's data for q distributions - if ( - options.get("M_2", None) - or options.get("M_3", None) - or options.get("M_4", None) - ): - if not Moecache.get("rinterpolator_q", None): - Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate( - table=Moecache["q_distributions"], # Contains the table of data - nparams=3, # log10M, log10P, q - ndata=1, # + """ + From here we go through the multiplicities. + """ + if multiplicity >= 2: + # If the multiplicity is higher than 1, we will need to construct the following tables: + # - period distribution table + # - q distribution table + # - eccentricity distribution table + + # Set up the interpolator for the periods + if not Moecache.get("rinterpolator_log10P", None): + Moecache["rinterpolator_log10P"] = py_rinterpolate.Rinterpolate( + table=Moecache["period_distributions"], # Contains the table of data + nparams=2, # log10M, log10P + ndata=2, # binary, triple verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), ) verbose_print( - "\tMoe_di_Stefano_2017_pdf: Created new q interpolator: {}".format( - Moecache["rinterpolator_q"] + "\tMoe_di_Stefano_2017_pdf: Created new period interpolator: {}".format( + Moecache["rinterpolator_log10P"] ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # Make a table storing Moe's data for q distributions, but only if the ecc is actually sampled - if "ecc" in options: - if not options["ecc"] == None: - if not Moecache.get("rinterpolator_e", None): - Moecache["rinterpolator_e"] = py_rinterpolate.Rinterpolate( - table=Moecache[ - "ecc_distributions" - ], # Contains the table of data - nparams=3, # log10M, log10P, e + # Make a table storing Moe's data for q distributions + if ( + options.get("M_2", None) + or options.get("M_3", None) + or options.get("M_4", None) + ): + if not Moecache.get("rinterpolator_q", None): + Moecache["rinterpolator_q"] = py_rinterpolate.Rinterpolate( + table=Moecache["q_distributions"], # Contains the table of data + nparams=3, # log10M, log10P, q ndata=1, # - verbosity=verbosity - - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), + verbosity=verbosity - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), ) verbose_print( - "\tMoe_di_Stefano_2017_pdf: Created new e interpolator: {}".format( - Moecache["rinterpolator_e"] + "\tMoe_di_Stefano_2017_pdf: Created new q interpolator: {}".format( + Moecache["rinterpolator_q"] ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - ############### - # Calculation for period of the binary + # Make a table storing Moe's data for q distributions, but only if the ecc is actually sampled + if "ecc" in options: + if not options["ecc"] == None: + if not Moecache.get("rinterpolator_e", None): + Moecache["rinterpolator_e"] = py_rinterpolate.Rinterpolate( + table=Moecache[ + "ecc_distributions" + ], # Contains the table of data + nparams=3, # log10M, log10P, e + ndata=1, # + verbosity=verbosity + - (_MOE2017_VERBOSITY_INTERPOLATOR_LEVEL - 1), + ) + verbose_print( + "\tMoe_di_Stefano_2017_pdf: Created new e interpolator: {}".format( + Moecache["rinterpolator_e"] + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - if options.get("M_2", None): - # Separation of the inner binary - options["sep"] = calc_sep_from_period( - options["M_1"], options["M_2"], options["P"] - ) - # TODO: add check for min_logP with instant RLOF? - # TODO: Actually use the value above. - # Total mass inner binary: - options["M_1+M_2"] = options["M_1"] + options["M_2"] + ############### + # Calculation for period of the binary - # Calculate P integral or use cached value + if options.get("M_2", None): + # Separation of the inner binary + options["sep"] = calc_sep_from_period( + options["M_1"], options["M_2"], options["P"] + ) + # TODO: add check for min_logP with instant RLOF? + # TODO: Actually use the value above. + # Total mass inner binary: + options["M_1+M_2"] = options["M_1"] + options["M_2"] - # get the periods from the Moecahe - min_logP = float(Moecache["logperiods"][0]) - max_logP = float(Moecache["logperiods"][-1]) + # Calculate P integral or use cached value - calc_P_integral( - options, - min_logP, - max_logP, - "P_integrals", - "rinterpolator_log10P", - "M_1", - verbosity, - ) - - # Set probabilty for P1 - p_val = Moecache["rinterpolator_log10P"].interpolate( - [np.log10(options["M_1"]), np.log10(options["P"])] - )[0] - p_val = p_val / Moecache["P_integrals"][options["M_1"]] - prob_dict["P"] = p_val - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended period (m={}, P={}) probability ({}) to the prob list ({})".format( - options["M_1"], options["P"], prob_dict["P"], prob_dict - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # if prob_dict['P'] == 0: # If the probability is 0 then we don't have to calculate more - # calc_total_probdens(prob_dict) - # return prob_dict + # get the periods from the Moecahe + min_logP = float(Moecache["logperiods"][0]) + max_logP = float(Moecache["logperiods"][-1]) - ############################################################ - # mass ratio (0 < q = M2/M1 < qmax) - # - # we need to construct the q table for the given M1 - # subject to qmin = Mmin/M1 - - if options.get("M_2", None): - # Build the table for q - primary_mass = options["M_1"] - secondary_mass = options["M_2"] - m_label = "M_1" - p_label = "P" - - # Construct the q table - build_q_table(options, m_label, p_label, verbosity=verbosity) - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Created q_table ({}) for m={} p={}".format( - Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) - ], - options[m_label], - options[p_label], - ), + self.calc_P_integral( + options, + min_logP, + max_logP, + "P_integrals", + "rinterpolator_log10P", + "M_1", verbosity, - _MOE2017_VERBOSITY_LEVEL, ) - # Add probability for the mass ratio - q_prob = Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) - ].interpolate([secondary_mass / primary_mass])[0] - prob_dict["q"] = q_prob + # Set probabilty for P1 + p_val = Moecache["rinterpolator_log10P"].interpolate( + [np.log10(options["M_1"]), np.log10(options["P"])] + )[0] + p_val = p_val / Moecache["P_integrals"][options["M_1"]] + prob_dict["P"] = p_val verbose_print( - "\tMoe_di_Stefano_2017_pdf: appended mass ratio (M={} P={} q={}) probability ({}) to the prob list ({}) ".format( - options["M_1"], - options["P"], - options["M_2"] / options["M_1"], - prob_dict["q"], - prob_dict, + "\tMoe_di_Stefano_2017_pdf: Appended period (m={}, P={}) probability ({}) to the prob list ({})".format( + options["M_1"], options["P"], prob_dict["P"], prob_dict ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # if prob_dict['q'] == 0: # If the probability is 0 then we don't have to calculate more + # if prob_dict['P'] == 0: # If the probability is 0 then we don't have to calculate more # calc_total_probdens(prob_dict) # return prob_dict - ############################################################ - # Eccentricity - # TODO: ask rob if the eccentricity requires an extrapolation as well. - - # Only do this if the eccentricity is sampled - if "ecc" in options: - if not options["ecc"] == None: - # Calculate ecc integral or use cached value - calc_e_integral( - options, "ecc_integrals", "rinterpolator_e", "M_1", "P", verbosity - ) - mass_period_string = "{}_{}".format(options["M_1"], options["P"]) - - # Set probability for ecc - ecc_val = Moecache["rinterpolator_e"].interpolate( - [np.log10(options["M_1"]), np.log10(options["P"]), options["ecc"]] - )[0] - ecc_val = ecc_val / Moecache["ecc_integrals"][mass_period_string] - prob_dict["ecc"] = ecc_val - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended eccentricity (m={}, P={}, ecc={}) probability ({}) to the prob list ({})".format( - options["M_1"], - options["P"], - options["ecc"], - prob_dict["ecc"], - prob_dict, - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # if prob_dict['ecc'] == 0: # If the probability is 0 then we don't have to calculate more - # calc_total_probdens(prob_dict) - # return prob_dict - - # Calculations for when multiplicity is bigger than 3 - # BEWARE: binary_c does not evolve these systems actually and the code below should be revised for when binary_c actually evolves triples. - # For that reason, I would not advise to use things with multiplicity > 3 - if multiplicity >= 3: - ############################################################ - # orbital period 2 = - # orbital period of star 3 (multiplicity==3) or - # the star3+star4 binary (multiplicity==4) + # mass ratio (0 < q = M2/M1 < qmax) # - # we assume the same period distribution for star 3 - # (or stars 3 and 4) but with a separation that is >10*a*(1+e) - # where 10*a*(1+e) is the maximum apastron separation of - # stars 1 and 2 - - # TODO: Is this a correct assumption? - max_sep = 10.0 * options["sep"] * (1.0 + options["ecc"]) - min_P2 = calc_period_from_sep(options["M_1+M_2"], options["mmin"], max_sep) - min_logP2 = math.log10(min_P2) - # max_logP2 = 10.0 - # min_logP = Moecache['logperiods'][0] - max_logP2 = float(Moecache["logperiods"][-1]) - - if options["P2"] < min_P2: - # period is too short : system is not hierarchical - prob_dict["P2"] = 0 + # we need to construct the q table for the given M1 + # subject to qmin = Mmin/M1 + + if options.get("M_2", None): + # Build the table for q + primary_mass = options["M_1"] + secondary_mass = options["M_2"] + m_label = "M_1" + p_label = "P" + + # Construct the q table + self.build_q_table(options, m_label, p_label, verbosity=verbosity) verbose_print( - "\tMoe_di_Stefano_2017_pdf: period2 is too short: {} < {}, system is not hierarchical. Added 0 to probability list".format( - options["P1"], min_P2 + "\tMoe_di_Stefano_2017_pdf: Created q_table ({}) for m={} p={}".format( + Moecache[ + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + ], + options[m_label], + options[p_label], ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # if prob_dict['P2'] == 0: # If the probability is 0 then we don't have to calculate more - # calc_total_probdens(prob_dict) - # return prob_dict - - else: - # period is long enough that the system is hierarchical - # hence the separation between the outer star - # and inner binary - options["sep2"] = calc_sep_from_period( - options["M_3"], options["M_1+M_2"], options["P2"] - ) - - # Check for cached value of P integral or calculate - calc_P_integral( - options, - min_logP2, - max_logP2, - "P2_integrals", - "rinterpolator_log10P", - "M_1+M_2", - verbosity, - ) - # Add the probability - p_val = Moecache["rinterpolator_log10P"].interpolate( - [np.log10(options["M_1+M_2"]), np.log10(options["P2"])] - )[0] - p_val = p_val / Moecache["P2_integrals"][options["M_1+M_2"]] - prob_dict["P2"] = p_val + # Add probability for the mass ratio + q_prob = Moecache[ + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + ].interpolate([secondary_mass / primary_mass])[0] + prob_dict["q"] = q_prob verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended period2 (m1={} m2={}, P2={}) probability ({}) to the prob list ({})".format( + "\tMoe_di_Stefano_2017_pdf: appended mass ratio (M={} P={} q={}) probability ({}) to the prob list ({}) ".format( options["M_1"], - options["M_2"], - options["P2"], - prob_dict["P2"], + options["P"], + options["M_2"] / options["M_1"], + prob_dict["q"], prob_dict, ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # if prob_dict['P2'] == 0: # If the probability is 0 then we don't have to calculate more + # if prob_dict['q'] == 0: # If the probability is 0 then we don't have to calculate more # calc_total_probdens(prob_dict) # return prob_dict - ############################################################ - # mass ratio 2 = q2 = M3 / (M1+M2) - # - # we need to construct the q table for the given M1 - # subject to qmin = Mmin/(M1+M2) + ############################################################ + # Eccentricity + # TODO: ask rob if the eccentricity requires an extrapolation as well. + + # Only do this if the eccentricity is sampled + if "ecc" in options: + if not options["ecc"] == None: + # Calculate ecc integral or use cached value + self.calc_e_integral( + options, "ecc_integrals", "rinterpolator_e", "M_1", "P", verbosity + ) + mass_period_string = "{}_{}".format(options["M_1"], options["P"]) - # Set the variables for the masses and their names - primary_mass = options["M_1+M_2"] - secondary_mass = options["M_3"] - m_label = "M_1+M_2" - p_label = "P2" + # Set probability for ecc + ecc_val = Moecache["rinterpolator_e"].interpolate( + [np.log10(options["M_1"]), np.log10(options["P"]), options["ecc"]] + )[0] + ecc_val = ecc_val / Moecache["ecc_integrals"][mass_period_string] + prob_dict["ecc"] = ecc_val + verbose_print( + "\tMoe_di_Stefano_2017_pdf: Appended eccentricity (m={}, P={}, ecc={}) probability ({}) to the prob list ({})".format( + options["M_1"], + options["P"], + options["ecc"], + prob_dict["ecc"], + prob_dict, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + # if prob_dict['ecc'] == 0: # If the probability is 0 then we don't have to calculate more + # calc_total_probdens(prob_dict) + # return prob_dict - # Build q table - build_q_table(options, m_label, p_label, verbosity=verbosity) - verbose_print( - "\tMoe_di_Stefano_2017_pdf: Called build_q_table", - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) + # Calculations for when multiplicity is bigger than 3 + # BEWARE: binary_c does not evolve these systems actually and the code below should be revised for when binary_c actually evolves triples. + # For that reason, I would not advise to use things with multiplicity > 3 + if multiplicity >= 3: - # Add the probability - q2_val = Moecache[ - "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) - ].interpolate([secondary_mass / primary_mass])[0] - prob_dict["q2"] = q2_val - verbose_print( - "\tMoe_di_Stefano_2017_pdf: appended mass ratio (M_1+M_2={} M_3={} P={} q={}) probability ({}) to the prob list ({}) ".format( - options["M_1+M_2"], - options["M_3"], - options["P"], - secondary_mass / primary_mass, - prob_dict["q2"], - prob_dict, - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - # if prob_dict['q2'] == 0: # If the probability is 0 then we don't have to calculate more - # calc_total_probdens(prob_dict) - # return prob_dict - - # TODO: Implement ecc2 calculation - if multiplicity == 4: - # quadruple system. - # TODO: Ask Rob about the structure of the quadruple. Is this only double binary quadruples? + ############################################################ + # orbital period 2 = + # orbital period of star 3 (multiplicity==3) or + # the star3+star4 binary (multiplicity==4) + # + # we assume the same period distribution for star 3 + # (or stars 3 and 4) but with a separation that is >10*a*(1+e) + # where 10*a*(1+e) is the maximum apastron separation of + # stars 1 and 2 + + # TODO: Is this a correct assumption? + max_sep = 10.0 * options["sep"] * (1.0 + options["ecc"]) + min_P2 = calc_period_from_sep(options["M_1+M_2"], options["mmin"], max_sep) + min_logP2 = math.log10(min_P2) + # max_logP2 = 10.0 + # min_logP = Moecache['logperiods'][0] + max_logP2 = float(Moecache["logperiods"][-1]) + + if options["P2"] < min_P2: + # period is too short : system is not hierarchical + prob_dict["P2"] = 0 + verbose_print( + "\tMoe_di_Stefano_2017_pdf: period2 is too short: {} < {}, system is not hierarchical. Added 0 to probability list".format( + options["P1"], min_P2 + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + # if prob_dict['P2'] == 0: # If the probability is 0 then we don't have to calculate more + # calc_total_probdens(prob_dict) + # return prob_dict - ############################################################ - # orbital period 3 - # - # we assume the same period distribution for star 4 - # as for any other stars but Pmax must be such that - # sep3 < sep2 * 0.2 - - # TODO: fix this here - max_sep3 = 0.2 * options["sep2"] * (1.0 + options["ecc2"]) - max_per3 = calc_period_from_sep( - options["M_1+M_2"], options["mmin"], max_sep3 + else: + # period is long enough that the system is hierarchical + # hence the separation between the outer star + # and inner binary + options["sep2"] = calc_sep_from_period( + options["M_3"], options["M_1+M_2"], options["P2"] ) - # Calculate P integral or use the cached value - # TODO: Make sure we use the correct period idea here. - calc_P_integral( + # Check for cached value of P integral or calculate + self.calc_P_integral( options, min_logP2, max_logP2, @@ -2267,145 +2058,237 @@ def Moe_di_Stefano_2017_pdf(options, verbosity=0): verbosity, ) - # Set probability + # Add the probability p_val = Moecache["rinterpolator_log10P"].interpolate( [np.log10(options["M_1+M_2"]), np.log10(options["P2"])] )[0] p_val = p_val / Moecache["P2_integrals"][options["M_1+M_2"]] - prob_dict["P3"] = p_val + prob_dict["P2"] = p_val verbose_print( - "\tMoe_di_Stefano_2017_pdf: Appended period2 (M=4) (M_1={} M_2={}, P2={}) probability ({}) to the prob list ({})".format( + "\tMoe_di_Stefano_2017_pdf: Appended period2 (m1={} m2={}, P2={}) probability ({}) to the prob list ({})".format( options["M_1"], options["M_2"], options["P2"], - prob_dict["P3"], + prob_dict["P2"], prob_dict, ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # if prob_dict['P3'] == 0: # If the probability is 0 then we don't have to calculate more + # if prob_dict['P2'] == 0: # If the probability is 0 then we don't have to calculate more # calc_total_probdens(prob_dict) # return prob_dict ############################################################ - # mass ratio 2 + # mass ratio 2 = q2 = M3 / (M1+M2) # # we need to construct the q table for the given M1 # subject to qmin = Mmin/(M1+M2) - # Make a table storing Moe's data for q distributions - # Build the table for q2 + # Set the variables for the masses and their names primary_mass = options["M_1+M_2"] secondary_mass = options["M_3"] m_label = "M_1+M_2" p_label = "P2" - # Calculate new q table - build_q_table(options, m_label, p_label, verbosity=verbosity) + # Build q table + self.build_q_table(options, m_label, p_label, verbosity=verbosity) verbose_print( - "\tMoe_di_Stefano_2017_pdf: Created q_table ".format(), + "\tMoe_di_Stefano_2017_pdf: Called build_q_table", verbosity, _MOE2017_VERBOSITY_LEVEL, ) # Add the probability - q3_prob = Moecache[ + q2_val = Moecache[ "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) ].interpolate([secondary_mass / primary_mass])[0] - prob_dict["q3"] = q3_prob + prob_dict["q2"] = q2_val verbose_print( "\tMoe_di_Stefano_2017_pdf: appended mass ratio (M_1+M_2={} M_3={} P={} q={}) probability ({}) to the prob list ({}) ".format( options["M_1+M_2"], options["M_3"], options["P"], secondary_mass / primary_mass, - prob_dict["q3"], + prob_dict["q2"], prob_dict, ), verbosity, _MOE2017_VERBOSITY_LEVEL, ) - # if prob_dict['q3'] == 0: # If the probability is 0 then we don't have to calculate more + # if prob_dict['q2'] == 0: # If the probability is 0 then we don't have to calculate more # calc_total_probdens(prob_dict) # return prob_dict - # TODO ecc 3 + # TODO: Implement ecc2 calculation + if multiplicity == 4: + # quadruple system. + # TODO: Ask Rob about the structure of the quadruple. Is this only double binary quadruples? + + ############################################################ + # orbital period 3 + # + # we assume the same period distribution for star 4 + # as for any other stars but Pmax must be such that + # sep3 < sep2 * 0.2 + + # TODO: fix this here + max_sep3 = 0.2 * options["sep2"] * (1.0 + options["ecc2"]) + max_per3 = calc_period_from_sep( + options["M_1+M_2"], options["mmin"], max_sep3 + ) - # check for input of multiplicity - elif multiplicity not in range(1, 5): - msg = "\tMoe_di_Stefano_2017_pdf: Unknown multiplicity {}".format(multiplicity) - verbose_print( - msg, - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - raise ValueError(msg) + # Calculate P integral or use the cached value + # TODO: Make sure we use the correct period idea here. + self.calc_P_integral( + options, + min_logP2, + max_logP2, + "P2_integrals", + "rinterpolator_log10P", + "M_1+M_2", + verbosity, + ) - # Calculate total probdens: - prob_dict = calc_total_probdens(prob_dict) + # Set probability + p_val = Moecache["rinterpolator_log10P"].interpolate( + [np.log10(options["M_1+M_2"]), np.log10(options["P2"])] + )[0] + p_val = p_val / Moecache["P2_integrals"][options["M_1+M_2"]] + prob_dict["P3"] = p_val + verbose_print( + "\tMoe_di_Stefano_2017_pdf: Appended period2 (M=4) (M_1={} M_2={}, P2={}) probability ({}) to the prob list ({})".format( + options["M_1"], + options["M_2"], + options["P2"], + prob_dict["P3"], + prob_dict, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + # if prob_dict['P3'] == 0: # If the probability is 0 then we don't have to calculate more + # calc_total_probdens(prob_dict) + # return prob_dict + + ############################################################ + # mass ratio 2 + # + # we need to construct the q table for the given M1 + # subject to qmin = Mmin/(M1+M2) + # Make a table storing Moe's data for q distributions + + # Build the table for q2 + primary_mass = options["M_1+M_2"] + secondary_mass = options["M_3"] + m_label = "M_1+M_2" + p_label = "P2" + + # Calculate new q table + self.build_q_table(options, m_label, p_label, verbosity=verbosity) + verbose_print( + "\tMoe_di_Stefano_2017_pdf: Created q_table ".format(), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) - # Some info - if multiplicity == 1: - verbose_print( - "\tMoe_di_Stefano_2017_pdf: M_1={} q=N/A log10P=N/A ({}): {} -> {}\n".format( - options["M_1"], - len(prob_dict), - str(prob_dict), - prob_dict["total_probdens"], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif multiplicity == 2: - verbose_print( - "\tMoe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} ({}): {} -> {}\n".format( - options["M_1"], - options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", - np.log10(options["P"]), - options["ecc"] if options.get("ecc", None) else "N/A", - len(prob_dict), - str(prob_dict), - prob_dict["total_probdens"], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif multiplicity == 3: - verbose_print( - "\tMoe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} M_3={} log10P2={} ecc2={} ({}): {} -> {}".format( - options["M_1"], - options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", - np.log10(options["P"]), - options["ecc"] if options.get("ecc", None) else "N/A", - options["M_3"], - np.log10(options["P2"]), - options["ecc2"] if options.get("ecc2", None) else "N/A", - len(prob_dict), - str(prob_dict), - prob_dict["total_probdens"], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - elif multiplicity == 4: - verbose_print( - "Moe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} M_3={} log10P2={} ecc2={} M_4={} log10P3={} ecc3={} ({}) : {} -> {}".format( - options["M_1"], - options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", - np.log10(options["P"]), - options["ecc"] if options.get("ecc", None) else "N/A", - options["M_3"], - np.log10(options["P2"]), - options["ecc2"] if options.get("ecc2", None) else "N/A", - options["M_4"], - np.log10(options["P3"]), - options["ecc3"] if options.get("ecc3", None) else "N/A", - len(prob_dict), - str(prob_dict), - prob_dict["total_probdens"], - ), - verbosity, - _MOE2017_VERBOSITY_LEVEL, - ) - return prob_dict + # Add the probability + q3_prob = Moecache[ + "rinterpolator_q_given_{}_log10{}".format(m_label, p_label) + ].interpolate([secondary_mass / primary_mass])[0] + prob_dict["q3"] = q3_prob + verbose_print( + "\tMoe_di_Stefano_2017_pdf: appended mass ratio (M_1+M_2={} M_3={} P={} q={}) probability ({}) to the prob list ({}) ".format( + options["M_1+M_2"], + options["M_3"], + options["P"], + secondary_mass / primary_mass, + prob_dict["q3"], + prob_dict, + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + # if prob_dict['q3'] == 0: # If the probability is 0 then we don't have to calculate more + # calc_total_probdens(prob_dict) + # return prob_dict + + # TODO ecc 3 + + # check for input of multiplicity + elif multiplicity not in range(1, 5): + msg = "\tMoe_di_Stefano_2017_pdf: Unknown multiplicity {}".format(multiplicity) + verbose_print( + msg, + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + raise ValueError(msg) + + # Calculate total probdens: + prob_dict = self.calc_total_probdens(prob_dict) + + # Some info + if multiplicity == 1: + verbose_print( + "\tMoe_di_Stefano_2017_pdf: M_1={} q=N/A log10P=N/A ({}): {} -> {}\n".format( + options["M_1"], + len(prob_dict), + str(prob_dict), + prob_dict["total_probdens"], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif multiplicity == 2: + verbose_print( + "\tMoe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} ({}): {} -> {}\n".format( + options["M_1"], + options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", + np.log10(options["P"]), + options["ecc"] if options.get("ecc", None) else "N/A", + len(prob_dict), + str(prob_dict), + prob_dict["total_probdens"], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif multiplicity == 3: + verbose_print( + "\tMoe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} M_3={} log10P2={} ecc2={} ({}): {} -> {}".format( + options["M_1"], + options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", + np.log10(options["P"]), + options["ecc"] if options.get("ecc", None) else "N/A", + options["M_3"], + np.log10(options["P2"]), + options["ecc2"] if options.get("ecc2", None) else "N/A", + len(prob_dict), + str(prob_dict), + prob_dict["total_probdens"], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + elif multiplicity == 4: + verbose_print( + "Moe_di_Stefano_2017_pdf: M_1={} q={} log10P={} ecc={} M_3={} log10P2={} ecc2={} M_4={} log10P3={} ecc3={} ({}) : {} -> {}".format( + options["M_1"], + options["M_2"] / options["M_1"] if options.get("M_2", None) else "N/A", + np.log10(options["P"]), + options["ecc"] if options.get("ecc", None) else "N/A", + options["M_3"], + np.log10(options["P2"]), + options["ecc2"] if options.get("ecc2", None) else "N/A", + options["M_4"], + np.log10(options["P3"]), + options["ecc3"] if options.get("ecc3", None) else "N/A", + len(prob_dict), + str(prob_dict), + prob_dict["total_probdens"], + ), + verbosity, + _MOE2017_VERBOSITY_LEVEL, + ) + return prob_dict diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py index 0eeb544eb6a4f66033ac3da9d685860068fee580..8e0e0ddccf71a2aa9b2c1d63312358d43a4bcd40 100644 --- a/binarycpython/utils/grid.py +++ b/binarycpython/utils/grid.py @@ -82,23 +82,29 @@ from binarycpython.utils.dicts import ( from binarycpython import _binary_c_bindings from binarycpython.utils.analytics import analytics +from binarycpython.utils.cache import cache from binarycpython.utils.dataIO import dataIO +from binarycpython.utils.distribution_functions import distribution_functions from binarycpython.utils.grid_logging import grid_logging from binarycpython.utils.grid_options_defaults import grid_options_defaults from binarycpython.utils.gridcode import gridcode from binarycpython.utils.HPC import HPC from binarycpython.utils.metadata import metadata from binarycpython.utils.Moe_di_Stefano_2017 import Moe_di_Stefano_2017 +from binarycpython.utils.spacing_functions import spacing_functions from binarycpython.utils.version import version class Population(analytics, + cache, dataIO, + distribution_functions, grid_logging, grid_options_defaults, gridcode, HPC, metadata, Moe_di_Stefano_2017, + spacing_functions, version): """ Population Object. Contains all the necessary functions to set up, run and process a @@ -109,9 +115,12 @@ class Population(analytics, """ Initialisation function of the population class """ + # caches + self.caches = {} + self.cached_function_cache = {} + self.original_function_cache = {} # Different sections of options - # get binary_c defaults and create a cleaned up dict # Setting stuff will check against the defaults to see if the input is correct. self.defaults = get_defaults() @@ -134,6 +143,7 @@ class Population(analytics, 'save_snapshot' : False, } + # grid code generation self.indent_depth = 0 self.indent_string = " " @@ -404,7 +414,7 @@ class Population(analytics, e=e )) self.exit(code=1) - + except ValueError: # might be able to eval the parameter, e.g. @@ -765,6 +775,9 @@ class Population(analytics, # restore from existing HPC files self.HPC_restore() + # set up LRU cache + self.setup_LRU_cache() + return def clean(self) -> None: @@ -1123,18 +1136,18 @@ class Population(analytics, "results": keys_to_floats(self.preloaded_population.grid_results) } - + for x in self._metadata_keylist(): try: combined_output_dict[x] = self.preloaded_population.grid_options[x] except Exception as e: print("Tried to set combined_output_dict key",x,"from preloaded_popuation, but this failed:",e) print("Pre-loaded data from {} stars".format(combined_output_dict["_count"])) - + # do not propagate _killed #combined_output_dict['results']['_killed'] = False #combined_output_dict['_killed'] = False - + self.preloaded_population = None gc.collect() else: @@ -1911,23 +1924,32 @@ class Population(analytics, ## Check which type of population generation # grid type if self.grid_options["evolution_type"] == "grid": - ####################### - # Dry run and getting starcount + ################################################## + # Grid run + ############################################################ + # Set up LRU cache + self.setup_LRU_cache() + ############################################################ + # Dry run and getting starcount + ############################################################ # Put in check if len(self.grid_options["_grid_variables"]) == 0: print("Error: you haven't defined any grid variables! Aborting") raise ValueError # Set up the grid code with a dry run option to see total probability + print("Do dry {}".format(self.grid_options["do_dry_run"])) if self.grid_options["do_dry_run"]: print("Doing dry run to calculate total starcount and probability") self._generate_grid_code(dry_run=True) # Load the grid code + print("load grid") self._load_grid_function() # Do a dry run + print("Do dry run") self._dry_run() self.verbose_print( @@ -2253,14 +2275,14 @@ class Population(analytics, self.grid_options["verbosity"], 3, ) - + def now(self,style=None,specifier=None): """ - convenience function to return a string of the current time, + convenience function to return a string of the current time, using the format "%m/%d/%Y %H:%M:%S" - Args: - style : if "nospace" then return the date/time with the format + Args: + style : if "nospace" then return the date/time with the format "%Y%m%d_%H%M%S" specifier: if set, uses this as a specifier rather than whatever is set by default or in the style variable @@ -2273,5 +2295,5 @@ class Population(analytics, else: # our default specifier = "%m/%d/%Y %H:%M:%S" - + return datetime.datetime.strftime(now,specifier) diff --git a/binarycpython/utils/grid_options_defaults.py b/binarycpython/utils/grid_options_defaults.py index 0cab159bb7354b62cb86a0df67773b0103584599..c15280a179aa3f5cd243fa4b37804c6895cf8a42 100644 --- a/binarycpython/utils/grid_options_defaults.py +++ b/binarycpython/utils/grid_options_defaults.py @@ -165,6 +165,34 @@ class grid_options_defaults(): ## Evolution from source file # TODO: make run from sourcefile options. + ######################################## + # function caching options + ######################################## + "function_cache" : True, + "function_cache_default_maxsize" : 256, + "function_cache_default_type" : "NullCache", # one of LRUCache, LFUCache, FIFOCache, MRUCache, RRCache, TTLCache, NullCache, NoCache + "function_cache_TTL" : 30, + "function_cache_functions" : { + # key=function_name : value=(cache_size, cache_type, test_args) + # + # if cache_size is 0, use function_cache_default_maxsize + # set above + # + # if cache_type is None, use function_cache_default_type + # set above + # + # if n is None, no cache is set up + "distribution_functions.powerlaw_constant" : (0, None, (1,100,-2)), + "distribution_functions.calculate_constants_three_part_powerlaw" : (4, None, (0.1,0.5,1,100,-1.3,-2.3,-2.3)), + "distribution_functions.gaussian_normalizing_const" : (0, None, (1.0,1.0,-10.0,+10.0)), + #"spacing_functions.const_linear" : (0, None, None), + #"spacing_functions.const_int" : (0, None, None), + #"spacing_functions.const_ranges" : (0, None, None), + #"spacing_functions.gaussian_zoom" : (0, None, None), + #"spacing_functions.const_dt" : (0, None, None), + #"spacing_functions.peak_normalized_gaussian_func" : (0, None, None), + }, + ######################################## # HPC variables ######################################## @@ -216,7 +244,7 @@ class grid_options_defaults(): 'condor_stream_error':True, # stream stderr 'condor_should_transfer_files' : 'YES', 'condor_when_to_transfer_output' : 'ON_EXIT_OR_EVICT', - + # (useful for debugging, otherwise a lot of work) 'condor_requirements':'', # job requirements 'condor_env' : shutil.which("env"), # /usr/bin/env location @@ -233,7 +261,7 @@ class grid_options_defaults(): # ######################################## # control flow 'rungrid' : 1, # usually run the grid, but can be 0 to skip it (e.g. for condor/slurm admin) - + } @@ -305,7 +333,7 @@ class grid_options_defaults(): "condor_initial_dir" : "String. Directory from which condor scripts are run. If set to the default, None, this is the directory from which your script is run.", "condor_submit" : "String. The Condor_submit command, usually \"/usr/bin/condor_submit\" but will depend on your HTCondor installation.", "condor_getenv" : "Boolean. If True, the default, condor takes the environment at submission and copies it to the jobs. You almost certainly want this to be True.", - "condor_batchname" : "String. Condor batchname option: this is what appears in condor_q. Defaults to \"binary_c-condor\"", + "condor_batchname" : "String. Condor batchname option: this is what appears in condor_q. Defaults to \"binary_c-condor\"", "condor_kill_sig" : "String. Signal Condor should use to stop a process. Note that grid.py expects this to be \"SIGINT\" which is the default.", ############################################################ # Slurm options @@ -321,10 +349,10 @@ class grid_options_defaults(): "slurm_postpone_join": "Integer, default 0. If 1 do not join job results with Slurm, instead you have to do it later manually.", "slurm_jobname": "String which names the Slurm jobs, default \"binary_c-python\".", "slurm_partition": "String containing the Slurm partition name. You should check your local Slurm installation to find out partition information, e.g. using the sview command.", - "slurm_time": "String. The time a Slurm job is allowed to take. Default is 0 which means no limit. Please check the Slurm documentation for required format of this option.", + "slurm_time": "String. The time a Slurm job is allowed to take. Default is 0 which means no limit. Please check the Slurm documentation for required format of this option.", "slurm_postpone_sbatch": "Integer, default 0. If set to 1, do not launch Slurm jobs with sbatch, just make the scripts that would have.", "slurm_array": "String. Override for Slurm's --array option, useful for rerunning jobs manually. Default None.", - "slurm_array_max_jobs" : "Integer. Override for the max number of concurrent Slurm array jobs. Default None.", + "slurm_array_max_jobs" : "Integer. Override for the max number of concurrent Slurm array jobs. Default None.", "slurm_extra_settings": "Dictionary of extra settings for Slurm to put in its launch script. Please see the Slurm documentation for the many options that are available to you.", "slurm_sbatch": "String. The Slurm \"sbatch\" submission command, usually \"/usr/bin/sbatch\" but will depend on your Slurm installation. By default is set automatically.", 'slurm_env' : "String. Points the location of the \"env\" command, e.g. /usr/bin/env or /bin/env, that is used in Slurm scripts. This is set automatically on the submit machine, so if it is different on the nodes, you should set it manually.", @@ -337,6 +365,14 @@ class grid_options_defaults(): "HPC_force_join" : "Integer, default 0. If 1, and the HPC variable (\"slurm\" or \"condor\") is 3, skip checking our own job and force the join.", "HPC_rebuild_joinlist": "Integer, default 0. If 1, ignore the joinlist we would usually use and rebuild it automatically", + ############################################################ + # Cacheing + ############################################################ + "function_cache" : "Boolean, default True. If True, we use a cache for certain function calls.", + "function_cache_default_maxsize" : "Integer, default 256. The default maxsize of the cache. Should be a power of 2.", + "function_cache_default_type": "String. One of the following types: LRUCache, LFUCache, FIFOCache, MRUCache, RRCache, TTLCache, NullCache, NoCache. You can find details of what these mean in the Python cachetools manual, except fo NoCache which means no cache is used at all, and NullCache is a dummy cache that never matches, used for testing overheads.", + "function_cache_functions." : "Dict. Keys are the function names that should be in the cache. The value is a tuple of (cache_size, cache_type, test_args) where cache_size used as the size of the cache, or if 0 the function_cache_default_maxsize is used. The cache_type is the function_cache_default_type if None, otherwise is the cache type (see the list defined at function_cache_default_type). The test_args are constant arguments used to call the function when testing the cache, see cache.cache_test() for details.", + ############################################################ "weight": "Weight factor for each system. The calculated probability is multiplied by this. If the user wants each system to be repeated several times, then this variable should not be changed, rather change the _repeat variable instead, as that handles the reduction in probability per system. This is useful for systems that have a process with some random element in it.", # TODO: add more info here, regarding the evolution splitting. "repeat": "Factor of how many times a system should be repeated. Consider the evolution splitting binary_c argument for supernovae kick repeating.", # TODO: make sure this is used. diff --git a/binarycpython/utils/gridcode.py b/binarycpython/utils/gridcode.py index 03c9e9ff8510112d73b4e6709b39c92baf031ae8..09671fa549fc62033160e45cdb4710fb0cbfb5d3 100644 --- a/binarycpython/utils/gridcode.py +++ b/binarycpython/utils/gridcode.py @@ -673,12 +673,12 @@ class gridcode(): # Take into account the multiplicity fraction: "\n", "# Factor the multiplicity fraction into the probability\n", - "probability = probability * self._calculate_multiplicity_fraction(parameter_dict)" + "probability *= self._calculate_multiplicity_fraction(parameter_dict)" + "\n", - # Add division by number of repeats + # Divide by number of repeats "\n", "# Divide the probability by the number of repeats\n", - 'probability = probability / self.grid_options["repeat"]' + "\n", + 'probability /= self.grid_options["repeat"]' + "\n", # Now we yield the system self.grid_options["repeat"] times. "\n", "# Loop over the repeats\n", @@ -905,7 +905,7 @@ class gridcode(): You can either use a real function, or a string representation of a function call. Examples: - samplerfunc = "const(math.log(m_min), math.log(m_max), {})".format(resolution['M_1']) + samplerfunc = "self.const_linear(math.log(m_min), math.log(m_max), {})".format(resolution['M_1']) precode: Extra room for some code. This code will be evaluated within the loop of the diff --git a/binarycpython/utils/spacing_functions.py b/binarycpython/utils/spacing_functions.py index ef2fa06a10dc3dfc481183208a5c44f44cbf50bd..2f7708accd74e7c0ac882cb7468ddfcef29a9eb0 100644 --- a/binarycpython/utils/spacing_functions.py +++ b/binarycpython/utils/spacing_functions.py @@ -7,6 +7,7 @@ Tasks: from typing import Union from diskcache import Cache +import cachetools import datetime import functools import json @@ -14,456 +15,481 @@ import math import numpy as np import py_rinterpolate import sys -from binarycpython.utils.grid import Population - - -@functools.lru_cache(maxsize=16) -def const( - min_bound: Union[int, float], max_bound: Union[int, float], steps: int -) -> list: - """ - Samples a range linearly. Uses numpy linspace, and returns an array of floats. Do NOT use this for integers. - - Args: - min_bound: lower bound of range - max_bound: upper bound of range - steps: number of segments between min_bound and max_bound - - Returns: - np.linspace(min_bound, max_bound, steps) - """ - return np.linspace(min_bound, max_bound, steps) - -@functools.lru_cache(maxsize=16) -def const_int( - min_bound: Union[int, float], max_bound: Union[int, float], steps: int -) -> list: - """ - Samples an integer range linearly. Returns a list of ints. - - Args: - min_bound: lower bound of range, must be an integer (is converted to int) - max_bound: upper bound of range, must be an integer (is converted to int) - steps: number of segments between min_bound and max_bound - - Returns: - range(min_bound,max_bound,step) - - where step is int((int(max_bound)-int(min_bound))/steps) - """ - - step = int((int(max_bound)-int(min_bound))/(steps-1)) - if steps <= 1: - return int(min_bound) - else: - return range(int(min_bound),int(max_bound+step),step) - - -############################################################ -@functools.lru_cache(maxsize=16) -def const_ranges(ranges) -> list: - """ - Samples a series of ranges linearly. - - Args: - ranges: a tuple of tuples passed to the const() spacing function. - - Returns: - numpy array of masses - - Example: - The following allocates 10 stars between 0.1 and 0.65, 20 stars between 0.65 - and 0.85, and 10 stars between 0.85 and 10.0 Msun. - - samplerfunc="const_ranges((({},{},{}),({},{},{}),({},{},{})))".format( - 0.1,0.65,10, - 0.65,0.85,20, - 0.85,10.0,10 - ), - - """ - - masses = np.empty(0) - for range in ranges: - masses = np.append(masses, const(*range)) - return np.unique(masses) - - -############################################################ -def peak_normalized_gaussian_func( - x: Union[int, float], mean: Union[int, float], sigma: Union[int, float] -) -> Union[int, float]: - """ - Function to evaluate a Gaussian at a given point, note - that the normalization is such that the peak is always 1.0, - not that the integral is 1.0 - - Args: - x: location at which to evaluate the distribution - mean: mean of the Gaussian - sigma: standard deviation of the Gaussian - - Returns: - value of the Gaussian at x - """ - gaussian_prefactor = 1.0 # / math.sqrt(2.0 * math.pi) - - r = 1.0 / sigma - y = (x - mean) * r - return math.exp(-0.5 * y ** 2) - - -############################################################ -@functools.lru_cache(maxsize=16) -def gaussian_zoom( - min_bound: Union[int, float], - max_bound: Union[int, float], - zoom_mean: Union[int, float], - zoom_dispersion: Union[int, float], - zoom_magnitude: Union[int, float], - steps: int, -) -> list: - """ - Samples such that a region is zoomed in according to a 1-Gaussian function - - Args: - min_bound: lower bound of range - max_bound: upper bound of range - zoom_mean: mean of the Gaussian zoom location - zoom_dispersion: dispersion of the Gaussian - zoom_magnitude: depth of the Gaussian (should be 0<= zoom_magntiude <1) - steps: number of segments between min_bound and max_bound assuming a linear step - this is what you'd normally call "resolution" - - Returns: - Numpy array of sample values - """ - - # linear spacing: this is what we'd have - # in the absence of a Gaussian zoom - linear_spacing = (max_bound - min_bound) / (steps - 1) - - # make the list of values - x = min_bound - array = np.array([]) - while x <= max_bound: - array = np.append(array, x) - g = peak_normalized_gaussian_func(x, zoom_mean, zoom_dispersion) - f = 1.0 - zoom_magnitude * g - dx = linear_spacing * f - x = x + dx - - # force the last array member to be max_bound if it's not - if array[-1] != max_bound: - array[-1] = max_bound - - return np.unique(array) - - -@functools.lru_cache(maxsize=16) -def const_dt(self,cachedir=None,usecache=True,**kwargs): - """ - const_dt returns a list of masses spaced at a constant age difference - - Args: - dt: the time difference between the masses (1000.0 Myr, used when logspacing==False) - dlogt : the delta log10(time) difference between masses (0.1 dex, used when logspacing==True) - mmin: the minimum mass to be considered in the stellar lifetime interpolation table (0.07 Msun) - mmax: the maximum mass to be considered in the stellar lifetime interpolation table (100.0 Msun) - nres: the resolution of the stellar lifetime interpolation table (100) - logspacing: whether to use log-spaced time, in which case dt is actually d(log10(t)) - tmin: the minimum time to consider (Myr, default 3.0 Myr) - tmax: the maximum time to consider (Myr, default None which means we use the grid option 'max_evolution_time') - mindm: a tuple of tuples containing a mass range and minimum mass spacing in that range. The default is ((0.07,1.0,0.1),(1.0,300.0,1.0)) allocated a minimum dm of 0.1Msun in the mass range 0.07 to 1.0 Msun and 1.0Msun in the range 1.0 to 300.0 Msun. Anything you set overrides this. Note, if you use only one tuple, you must set it with a trailing comma, thus, e.g. ((0.07,1.0,0.1),). (default None) - maxdm: a list of tuples similar to mindm but specifying a maximum mass spacing. In the case of maxdm, if the third option in each tuple is negative it is treated as a log step (its absolute value is used as the step). (default None) - fsample: a global sampling (Shannon-like) factor (<1) to improve resolution (default 1.0, set to smaller to improve resolution) - factor: all masses generated are multiplied by this after generation - showtable: if True, the mass list and times are shown to stdout after generation - showlist: if True, show the mass list once generated - logmasses: if True, the masses are logged with math.log() - log10masses: if True, the masses are logged with math.log10() - usecache: if True (the default) uses cached results if they are saved (in cachedir) - cachedir: if None, defaults to grid_options['tmp_dir']/const_dt_cache - - Returns: - Array of masses. - - Example: - # these are lines set as options to Population.add_grid_value(...) - - # linear time bins of 1Gyr - samplerfunc="const_dt(self,dt=1000,nres=100,mmin=0.07,mmax=2.0,showtable=True)" - - # logarithmic spacing in time, generally suitable for Galactic - # chemical evolution yield grids. - samplerfunc="const_dt(self,dlogt=0.1,nres=100,mmin=0.07,mmax=80.0,maxdm=((0.07,1.0,0.1),(1.0,10.0,1.0),(10.0,80.0,2.0)),showtable=True,logspacing=True,fsample=1.0/4.0)" - - """ - if cachedir == None: - cachedir = self.grid_options['cache_dir'] + '/const_dt_cache' - cache = Cache(cachedir) - @cache.memoize() - def _const_dt(cachedir, - num_cores, - bse_options, - dt=1000.0, - dlogt=0.1, - mmin=0.07, - mmax=100.0, - nres=1000, - logspacing=False, - tmin=3.0, # start at 3Myr - tmax=None, # use max_evolution_time by default - mindm=None, # tuple of tuples - maxdm=((0.07, 1.0, 0.1), (1.0, 300.0, 1.0)), # tuple of tuples - fsample=1.0, - factor=1.0, - logmasses=False, - log10masses=False, - showlist=False, - showtable=False, - usecache=True, - ): - - # first thing to do is make a stellar lifetime table - # - # we should use the bse_options passed in - # so our lifetime_population uses the same physics - # as the main grid - - # convert bse_options to dict - bse_options = json.loads(bse_options) - - # make new population object - lifetime_population = Population() - lifetime_population.bse_options = bse_options - - # we only want to evolve the star during nuclear burning, - # we don't want a dry run of the grid - # we want to use the right number of CPU cores - lifetime_population.set( - do_dry_run=False, - num_cores=num_cores, - max_stellar_type_1=10, - save_ensemble_chunks=False, - symlink_latest_gridcode=False, - modulo=1, - start_at=0, - slurm=0, - condor=0, - ) - # make a grid in M1 - lifetime_population.add_grid_variable( - name="lnM_1", - parameter_name="M_1", - longname="log Primary mass", # == single-star mass - valuerange=[math.log(mmin), math.log(mmax)], - samplerfunc="const(math.log({mmin}),math.log({mmax}),{nres})".format( - mmin=mmin, mmax=mmax, nres=nres - ), - probdist="1", # dprob/dm1 : we don't care, so just set it to 1 - dphasevol="dlnM_1", - precode="M_1=math.exp(lnM_1)", - condition="", # Impose a condition on this grid variable. Mostly for a check for yourself - gridtype="edge", - ) +class spacing_functions(): + + def __init__(self, **kwargs): + # don't do anything: we just inherit from this class + return + + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.const_linear"] + ) + def const_linear( + self, + min_bound: Union[int, float], max_bound: Union[int, float], steps: int + ) -> list: + """ + Samples a range linearly. Uses numpy linspace, and returns an array of floats. Do NOT use this for integers. + + Args: + min_bound: lower bound of range + max_bound: upper bound of range + steps: number of segments between min_bound and max_bound + + Returns: + np.linspace(min_bound, max_bound, steps) + """ + return np.linspace(min_bound, max_bound, steps) + + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.const_int"] + ) + def const_int( + self, + min_bound: Union[int, float], max_bound: Union[int, float], steps: int + ) -> list: + """ + Samples an integer range linearly. Returns a list of ints. + + Args: + min_bound: lower bound of range, must be an integer (is converted to int) + max_bound: upper bound of range, must be an integer (is converted to int) + steps: number of segments between min_bound and max_bound + + Returns: + range(min_bound,max_bound,step) + + where step is int((int(max_bound)-int(min_bound))/steps) + """ + + step = int((int(max_bound)-int(min_bound))/(steps-1)) + if steps <= 1: + return int(min_bound) + else: + return range(int(min_bound),int(max_bound+step),step) - # set up the parse function - def _parse_function(self, output): - if output: - for line in output.splitlines(): - data = line.split() - if data[0] == "SINGLE_STAR_LIFETIME": - # append (log10(mass), log10(lifetime)) tuples - logm = math.log10(float(data[1])) - logt = math.log10(float(data[2])) - # print(line) - # print("logM=",logm,"M=",10.0**logm," -> logt=",logt) - self.grid_results["interpolation table m->t"][logm] = logt - self.grid_results["interpolation table t->m"][logt] = logm - - lifetime_population.set( - parse_function=_parse_function, - ) - # run to build the interpolation table - print("Running population to make lifetime interpolation table, please wait") - lifetime_population.evolve() - # print("Data table",lifetime_population.grid_results['interpolation table t->m']) + ############################################################ + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.const_ranges"] + ) + def const_ranges(self,ranges) -> list: + """ + Samples a series of ranges linearly. - if not "interpolation table t->m" in lifetime_population.grid_results or len(lifetime_population.grid_results["interpolation table t->m"].keys()) == 0: - print("\n\n\nError: The t->m lifetime table is empty. One usual cause for this is that the tmax or max_evolution_time option (currently passed in to const_dt as {tmax}) is too short for there to be any entries in the table before the first timestep. Try increasing tmax and max_evolution_time, shorten the timestep or, if using log times, set tstart to be closer to 0.\n".format(tmax=tmax)) - exit() + Args: + ranges: a tuple of tuples passed to the self.const_linear() spacing function. + Returns: + numpy array of masses - # convert to nested lists for the interpolator - # - # make time -> mass table - data_table_time_mass = [] - times = sorted(lifetime_population.grid_results["interpolation table t->m"].keys()) - for time in times: - mass = lifetime_population.grid_results["interpolation table t->m"][time] - # we have to make sure the time is monotonic (not guaranteed at high mass) - if len(data_table_time_mass) == 0: - data_table_time_mass.append([time, mass]) - elif mass < data_table_time_mass[-1][1]: - data_table_time_mass.append([time, mass]) - - # make mass -> time table - data_table_mass_time = [] - masses = sorted(lifetime_population.grid_results["interpolation table m->t"].keys()) - for mass in masses: - time = lifetime_population.grid_results["interpolation table m->t"][mass] - data_table_mass_time.append([mass, time]) - - # set up interpolators - interpolator_time_mass = py_rinterpolate.Rinterpolate( - table=data_table_time_mass, nparams=1, ndata=1, verbosity=0 # mass # lifetime - ) - interpolator_mass_time = py_rinterpolate.Rinterpolate( - table=data_table_mass_time, nparams=1, ndata=1, verbosity=0 # lifetime # mass - ) + Example: + The following allocates 10 stars between 0.1 and 0.65, 20 stars between 0.65 + and 0.85, and 10 stars between 0.85 and 10.0 Msun. - # function to get a mass given a time (Myr) - def _mass_from_time(linear_time): - return 10.0 ** interpolator_time_mass.interpolate([math.log10(linear_time)])[0] - - # function to get a time given a mass (Msun) - def _time_from_mass(mass): - return 10.0 ** interpolator_mass_time.interpolate([math.log10(mass)])[0] - - # return a unique list - def _uniq(_list): - return sorted(list(set(_list))) - - # format a whole list like %g - def _format(_list): - return [float("{x:g}".format(x=x)) for x in _list] - - # construct mass list, always include the min and max - mass_list = [mmin, mmax] - - # first, make sure the stars are separated by only - # maxdm - if maxdm: - for x in maxdm: - range_min = x[0] - range_max = x[1] - dm = x[2] - if dm < 0.0: - # use log scale - dlogm = -dm - logm = math.log(mmin) - logmmax = math.log(mmax) - logrange_min = math.log(range_min) - logrange_max = math.log(range_max) - while logm <= logmmax: - if logm >= logrange_min and logm <= logrange_max: - mass_list.append(math.exp(logm)) - logm += dlogm - else: - # use linear scale - m = mmin - while m <= mmax: - if m >= range_min and m <= range_max: - mass_list.append(m) - m += dm - - # start time loop at tmax or max_evolution_time - t = tmax if tmax else bse_options['max_evolution_time'] - - # set default mass list - if logspacing: - logt = math.log10(t) - logtmin = math.log10(tmin) - while logt > logtmin: - m = _mass_from_time(10.0 ** logt) - mass_list.append(m) - logt = max(logtmin, logt - dlogt * fsample) - else: - while t > tmin: - m = _mass_from_time(t) - mass_list.append(m) - t = max(tmin, t - dt * fsample) - - # make mass list unique - mass_list = _uniq(mass_list) - - if mindm: - for x in mindm: - range_min = x[0] - range_max = x[1] - mindm = x[2] - # impose a minimum dm: if two masses in the list - # are separated by < this, remove the second - for index, mass in enumerate(mass_list): - if index > 0 and mass >= range_min and mass <= range_max: - dm = mass_list[index] - mass_list[index - 1] - if dm < mindm: - mass_list[index - 1] = 0.0 - mass_list = _uniq(mass_list) - if mass_list[0] == 0.0: - mass_list.remove(0.0) - - # apply multiplication factor if given - if factor and factor != 1.0: - mass_list = [m * factor for m in mass_list] - - # reformat numbers - mass_list = _format(mass_list) - - # show the mass<>time table? - if showtable: - twas = 0.0 - logtwas = 0.0 - for i, m in enumerate(mass_list): - t = _time_from_mass(m) + samplerfunc="const_ranges((({},{},{}),({},{},{}),({},{},{})))".format( + 0.1,0.65,10, + 0.65,0.85,20, + 0.85,10.0,10 + ), + + """ + + masses = np.empty(0) + for range in ranges: + masses = np.append(masses, self.const_linear(*range)) + return np.unique(masses) + + + ############################################################ + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.peak_normalized_gaussian_func"] + ) + def peak_normalized_gaussian_func( + self, + x: Union[int, float], mean: Union[int, float], sigma: Union[int, float] + ) -> Union[int, float]: + """ + Function to evaluate a Gaussian at a given point, note + that the normalization is such that the peak is always 1.0, + not that the integral is 1.0 + + Args: + x: location at which to evaluate the distribution + mean: mean of the Gaussian + sigma: standard deviation of the Gaussian + + Returns: + value of the Gaussian at x + """ + gaussian_prefactor = 1.0 # / math.sqrt(2.0 * math.pi) + + r = 1.0 / sigma + y = (x - mean) * r + return math.exp(-0.5 * y ** 2) + + + ############################################################ + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.gaussian_zoom"] + ) + def gaussian_zoom( + self, + min_bound: Union[int, float], + max_bound: Union[int, float], + zoom_mean: Union[int, float], + zoom_dispersion: Union[int, float], + zoom_magnitude: Union[int, float], + steps: int, + ) -> list: + """ + Samples such that a region is zoomed in according to a 1-Gaussian function + + Args: + min_bound: lower bound of range + max_bound: upper bound of range + zoom_mean: mean of the Gaussian zoom location + zoom_dispersion: dispersion of the Gaussian + zoom_magnitude: depth of the Gaussian (should be 0<= zoom_magntiude <1) + steps: number of segments between min_bound and max_bound assuming a linear step + this is what you'd normally call "resolution" + + Returns: + Numpy array of sample values + """ + + # linear spacing: this is what we'd have + # in the absence of a Gaussian zoom + linear_spacing = (max_bound - min_bound) / (steps - 1) + + # make the list of values + x = min_bound + array = np.array([]) + while x <= max_bound: + array = np.append(array, x) + g = peak_normalized_gaussian_func(x, zoom_mean, zoom_dispersion) + f = 1.0 - zoom_magnitude * g + dx = linear_spacing * f + x = x + dx + + # force the last array member to be max_bound if it's not + if array[-1] != max_bound: + array[-1] = max_bound + + return np.unique(array) + + + @cachetools.cachedmethod( + lambda self: self.caches["spacing_functions.const_dt"] + ) + def const_dt(self,cachedir=None,usecache=True,**kwargs): + """ + const_dt returns a list of masses spaced at a constant age difference + + Args: + dt: the time difference between the masses (1000.0 Myr, used when logspacing==False) + dlogt : the delta log10(time) difference between masses (0.1 dex, used when logspacing==True) + mmin: the minimum mass to be considered in the stellar lifetime interpolation table (0.07 Msun) + mmax: the maximum mass to be considered in the stellar lifetime interpolation table (100.0 Msun) + nres: the resolution of the stellar lifetime interpolation table (100) + logspacing: whether to use log-spaced time, in which case dt is actually d(log10(t)) + tmin: the minimum time to consider (Myr, default 3.0 Myr) + tmax: the maximum time to consider (Myr, default None which means we use the grid option 'max_evolution_time') + mindm: a tuple of tuples containing a mass range and minimum mass spacing in that range. The default is ((0.07,1.0,0.1),(1.0,300.0,1.0)) allocated a minimum dm of 0.1Msun in the mass range 0.07 to 1.0 Msun and 1.0Msun in the range 1.0 to 300.0 Msun. Anything you set overrides this. Note, if you use only one tuple, you must set it with a trailing comma, thus, e.g. ((0.07,1.0,0.1),). (default None) + maxdm: a list of tuples similar to mindm but specifying a maximum mass spacing. In the case of maxdm, if the third option in each tuple is negative it is treated as a log step (its absolute value is used as the step). (default None) + fsample: a global sampling (Shannon-like) factor (<1) to improve resolution (default 1.0, set to smaller to improve resolution) + factor: all masses generated are multiplied by this after generation + showtable: if True, the mass list and times are shown to stdout after generation + showlist: if True, show the mass list once generated + logmasses: if True, the masses are logged with math.log() + log10masses: if True, the masses are logged with math.log10() + usecache: if True (the default) uses cached results if they are saved (in cachedir) + cachedir: if None, defaults to grid_options['tmp_dir']/const_dt_cache + + Returns: + Array of masses. + + Example: + # these are lines set as options to Population.add_grid_value(...) + + # linear time bins of 1Gyr + samplerfunc="self.const_dt(self,dt=1000,nres=100,mmin=0.07,mmax=2.0,showtable=True)" + + # logarithmic spacing in time, generally suitable for Galactic + # chemical evolution yield grids. + samplerfunc="self.const_dt(self,dlogt=0.1,nres=100,mmin=0.07,mmax=80.0,maxdm=((0.07,1.0,0.1),(1.0,10.0,1.0),(10.0,80.0,2.0)),showtable=True,logspacing=True,fsample=1.0/4.0)" + + """ + print("Cache dir {}".format(self.grid_options['cache_dir'])) + if cachedir == None: + cachedir = self.grid_options['cache_dir'] + '/const_dt_cache' + cache = Cache(cachedir) + @cache.memoize() # memoize to disc + def _const_dt(cachedir, + num_cores, + bse_options, + dt=1000.0, + dlogt=0.1, + mmin=0.07, + mmax=100.0, + nres=1000, + logspacing=False, + tmin=3.0, # start at 3Myr + tmax=None, # use max_evolution_time by default + mindm=None, # tuple of tuples + maxdm=((0.07, 1.0, 0.1), (1.0, 300.0, 1.0)), # tuple of tuples + fsample=1.0, + factor=1.0, + logmasses=False, + log10masses=False, + showlist=False, + showtable=False, + usecache=True, + ): + print("_const_dt num_cores={} dt={} dlogt={} mmin={} mmax={} nres={} logscpaing={} tmin={} mindm={} maxdm={} fsample={} factor={} logmasses={} log10masses={}".format( + num_cores,dt,dlogt,mmin,mmax,nres,logspacing,tmin,mindm,maxdm,fsample,factor,logmasses,log10masses)) + # first thing to do is make a stellar lifetime table + # + # we should use the bse_options passed in + # so our lifetime_population uses the same physics + # as the main grid + + # convert bse_options to dict + bse_options = json.loads(bse_options) + + # make new population object + from binarycpython.utils.grid import Population + lifetime_population = Population() + lifetime_population.bse_options = bse_options + + # we only want to evolve the star during nuclear burning, + # we don't want a dry run of the grid + # we want to use the right number of CPU cores + lifetime_population.set( + do_dry_run=False, + num_cores=num_cores, + max_stellar_type_1=10, + save_ensemble_chunks=False, + symlink_latest_gridcode=False, + modulo=1, + start_at=0, + slurm=0, + condor=0, + ) + + # make a grid in M1 + lifetime_population.add_grid_variable( + name="lnM_1", + parameter_name="M_1", + longname="log Primary mass", # == single-star mass + valuerange=[math.log(mmin), math.log(mmax)], + samplerfunc="self.const_linear(math.log({mmin}),math.log({mmax}),{nres})".format( + mmin=mmin, mmax=mmax, nres=nres + ), + probdist="1", # dprob/dm1 : we don't care, so just set it to 1 + dphasevol="dlnM_1", + precode="M_1=math.exp(lnM_1)", + condition="", # Impose a condition on this grid variable. Mostly for a check for yourself + gridtype="edge", + ) + + # set up the parse function + def _parse_function(self, output): + if output: + for line in output.splitlines(): + data = line.split() + if data[0] == "SINGLE_STAR_LIFETIME": + # append (log10(mass), log10(lifetime)) tuples + logm = math.log10(float(data[1])) + logt = math.log10(float(data[2])) + # print(line) + # print("logM=",logm,"M=",10.0**logm," -> logt=",logt) + self.grid_results["interpolation table m->t"][logm] = logt + self.grid_results["interpolation table t->m"][logt] = logm + + lifetime_population.set( + parse_function=_parse_function, + ) + + # run to build the interpolation table + print("Running population to make lifetime interpolation table, please wait") + lifetime_population.evolve() + # print("Data table",lifetime_population.grid_results['interpolation table t->m']) + + if not "interpolation table t->m" in lifetime_population.grid_results or len(lifetime_population.grid_results["interpolation table t->m"].keys()) == 0: + print("\n\n\nError: The t->m lifetime table is empty. One usual cause for this is that the tmax or max_evolution_time option (currently passed in to const_dt as {tmax}) is too short for there to be any entries in the table before the first timestep. Try increasing tmax and max_evolution_time, shorten the timestep or, if using log times, set tstart to be closer to 0.\n".format(tmax=tmax)) + exit() + + + # convert to nested lists for the interpolator + # + # make time -> mass table + data_table_time_mass = [] + times = sorted(lifetime_population.grid_results["interpolation table t->m"].keys()) + for time in times: + mass = lifetime_population.grid_results["interpolation table t->m"][time] + # we have to make sure the time is monotonic (not guaranteed at high mass) + if len(data_table_time_mass) == 0: + data_table_time_mass.append([time, mass]) + elif mass < data_table_time_mass[-1][1]: + data_table_time_mass.append([time, mass]) + + # make mass -> time table + data_table_mass_time = [] + masses = sorted(lifetime_population.grid_results["interpolation table m->t"].keys()) + for mass in masses: + time = lifetime_population.grid_results["interpolation table m->t"][mass] + data_table_mass_time.append([mass, time]) + + # set up interpolators + interpolator_time_mass = py_rinterpolate.Rinterpolate( + table=data_table_time_mass, nparams=1, ndata=1, verbosity=0 # mass # lifetime + ) + interpolator_mass_time = py_rinterpolate.Rinterpolate( + table=data_table_mass_time, nparams=1, ndata=1, verbosity=0 # lifetime # mass + ) + + # function to get a mass given a time (Myr) + def _mass_from_time(linear_time): + return 10.0 ** interpolator_time_mass.interpolate([math.log10(linear_time)])[0] + + # function to get a time given a mass (Msun) + def _time_from_mass(mass): + return 10.0 ** interpolator_mass_time.interpolate([math.log10(mass)])[0] + + # return a unique list + def _uniq(_list): + return sorted(list(set(_list))) + + # format a whole list like %g + def _format(_list): + return [float("{x:g}".format(x=x)) for x in _list] + + # construct mass list, always include the min and max + mass_list = [mmin, mmax] + + # first, make sure the stars are separated by only + # maxdm + if maxdm: + for x in maxdm: + range_min = x[0] + range_max = x[1] + dm = x[2] + if dm < 0.0: + # use log scale + dlogm = -dm + logm = math.log(mmin) + logmmax = math.log(mmax) + logrange_min = math.log(range_min) + logrange_max = math.log(range_max) + while logm <= logmmax: + if logm >= logrange_min and logm <= logrange_max: + mass_list.append(math.exp(logm)) + logm += dlogm + else: + # use linear scale + m = mmin + while m <= mmax: + if m >= range_min and m <= range_max: + mass_list.append(m) + m += dm + + + # start time loop at tmax or max_evolution_time + t = tmax if tmax else bse_options['max_evolution_time'] + + # set default mass list + if logspacing: logt = math.log10(t) - if twas > 0.0: - print( - "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g} dt={dt:13g} dlog10(t)={dlogt:13g}".format( - i=i, m=m, t=t, logt=logt, dt=twas - t, dlogt=logtwas - logt + logtmin = math.log10(tmin) + while logt > logtmin: + m = _mass_from_time(10.0 ** logt) + mass_list.append(m) + logt = max(logtmin, logt - dlogt * fsample) + else: + while t > tmin: + m = _mass_from_time(t) + mass_list.append(m) + t = max(tmin, t - dt * fsample) + + # make mass list unique + mass_list = _uniq(mass_list) + + if mindm: + for x in mindm: + range_min = x[0] + range_max = x[1] + mindm = x[2] + # impose a minimum dm: if two masses in the list + # are separated by < this, remove the second + for index, mass in enumerate(mass_list): + if index > 0 and mass >= range_min and mass <= range_max: + dm = mass_list[index] - mass_list[index - 1] + if dm < mindm: + mass_list[index - 1] = 0.0 + mass_list = _uniq(mass_list) + if mass_list[0] == 0.0: + mass_list.remove(0.0) + + # apply multiplication factor if given + if factor and factor != 1.0: + mass_list = [m * factor for m in mass_list] + + # reformat numbers + mass_list = _format(mass_list) + + # show the mass<>time table? + if showtable: + twas = 0.0 + logtwas = 0.0 + for i, m in enumerate(mass_list): + t = _time_from_mass(m) + logt = math.log10(t) + if twas > 0.0: + print( + "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g} dt={dt:13g} dlog10(t)={dlogt:13g}".format( + i=i, m=m, t=t, logt=logt, dt=twas - t, dlogt=logtwas - logt + ) ) - ) - else: - print( - "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g}".format( - i=i, m=m, t=t, logt=logt + else: + print( + "{i:4d} m={m:13g} t={t:13g} log10(t)={logt:13g}".format( + i=i, m=m, t=t, logt=logt + ) ) - ) - twas = t - logtwas = logt - exit() + twas = t + logtwas = logt + exit() + + # return the mass list as a numpy array + mass_list = np.unique(np.array(mass_list)) - # return the mass list as a numpy array - mass_list = np.unique(np.array(mass_list)) + # perhaps log the masses + if logmasses: + mass_list = np.log(mass_list) + if log10masses: + mass_list = np.log10(mass_list) - # perhaps log the masses - if logmasses: - mass_list = np.log(mass_list) - if log10masses: - mass_list = np.log10(mass_list) + return mass_list + # call _const_dt and return the mass_list + # + # Note: because _const_dt is cached to disk, calling it may + # use the cached result. + # + # Note: we send a sorted JSON string instead of the + # bse_options dict to make sure the order is preserved + mass_list = _const_dt(cachedir, + self.grid_options['num_cores'], + json.dumps(self.bse_options, + sort_keys=True, + ensure_ascii=False), + **kwargs + ) + cache.close() + if kwargs.get('showlist',True): + print("const_dt mass list ({} masses)\n".format(len(mass_list)), mass_list) return mass_list - - # call _const_dt and return the mass_list - # - # Note: because _const_dt is cached to disk, calling it may - # use the cached result. - # - # Note: we send a sorted JSON string instead of the - # bse_options dict to make sure the order is preserved - mass_list = _const_dt(cachedir, - self.grid_options['num_cores'], - json.dumps(self.bse_options,sort_keys=True,ensure_ascii=False), - **kwargs - ) - cache.close() - - if kwargs.get('showlist',True): - print("const_dt mass list ({} masses)\n".format(len(mass_list)), mass_list) - - return mass_list diff --git a/setup.py b/setup.py index e0e1534559826125278230f22ee9dcb0ae73fef7..15fd6b9deb432cf724166f420237457afd5b1193 100644 --- a/setup.py +++ b/setup.py @@ -266,6 +266,7 @@ setup( "compress_pickle", "datasize", "diskcache", + "flufl.lock", "h5py", "halo", "humanize",