diff --git a/binarycpython/utils/distribution_functions.py b/binarycpython/utils/distribution_functions.py index 4cb4f17fde4d7647cf4fdd4f6ad5690f09ba07fa..7180778a2d005acad6983a1c62b89ea8f9c71a56 100644 --- a/binarycpython/utils/distribution_functions.py +++ b/binarycpython/utils/distribution_functions.py @@ -30,7 +30,6 @@ from binarycpython.utils.useful_funcs import calc_period_from_sep LOG_LN_CONVERTER = 1.0 / math.log(10.0) distribution_constants = {} # To store the constants in - def prepare_dict(global_dict: dict, list_of_sub_keys: list) -> None: """ Function that makes sure that the global dict is prepared to have a value set there. This dictionary will store values and factors for the distribution functions, so that they dont have to be calculated each time. @@ -405,7 +404,9 @@ def gaussian( def Kroupa2001(m: Union[int, float], newopts: dict = None) -> Union[int, float]: """ - Probability distribution function for kroupa 2001 IMF, where the default values to the three_part_powerlaw are: default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 100, "p1": -1.3, "p2": -2.3,"p3": -2.3} + Probability distribution function for kroupa 2001 IMF, where the default values to the + three_part_powerlaw are: + default = {"m0": 0.1, "m1": 0.5, "m2": 1, "mmax": 100, "p1": -1.3, "p2": -2.3,"p3": -2.3} Args: m: mass to evaluate the distribution at @@ -888,8 +889,6 @@ def flatsections(x: float, opts: dict) -> Union[float, int]: # print("flatsections gives C={}: y={}\n",c,y) return y - - # print(flatsections(1, [{'min': 0, 'max': 2, 'height': 3}])) ######################################################################## @@ -918,3 +917,832 @@ def cosmic_SFH_madau_dickinson2014(z): ######################################################################## # Metallicity distributions ######################################################################## + + +######################################################################## +# Moe & DiStefano 2017 functions +######################################################################## + +import py_rinterpolate + +# Tasks +# TODO: check all the raise ValueErrors to make them more appropriate +# TODO: Put the json checking stuff in a different function +# TODO: make function to normalize dictionary of 1 layer deep +# TODO: make use of the @cached_property decorators to make use of cached calls +# TODO: Fix the automatic freeing functions in py_rinterpolate +# TODO: check very well which functions actually need to be part of the population object + +# Global dictionary to store values in +Moecache = {} + +def poisson(lambda_val, n, nmax=None): + """ + Function that calculates the poisson value and normalizes TODO: improve the description + """ + + cachekey = "{} {} {}".format(lambda_val, n, nmax) + + if distribution_constants.get('poisson_cache', None): + if distribution_constants['poisson_cache'].get(cachekey, None): + p_val = distribution_constants['poisson_cache'][cachekey] + print("Found cached value: Poisson ({}, {}, {}) = {}\n".format(lambda_val, n, nmax, p_val)) + return p_val + + # Poisson distribution : note, n can be zero + # + # nmax is the truncation : if set, we normalize + # correctly. + p_val = _poisson(lambda_val, n) + + if nmax: + I_poisson = 0 + for i in range(nmax+1): + I_poisson += _poisson(lambda_val, i) + p_val = p_val/I_poisson + + # Add to cache + if not distribution_constants.get('poisson_cache', None): + distribution_constants['poisson_cache'] = {} + distribution_constants['poisson_cache'][cachekey] = p_val + + # print("Poisson ({}, {}, {}) = {}\n".format(lambda_val, n, nmax, p_val)) + return p_val + +def _poisson(lambda_val, n): + """ + Function to return the poisson value + """ + + return (lambda_val ** n) * np.exp(-lambda_val) / (1.0 * math.factorial(n)) + + + +def Moe_de_Stefano_2017_multiplicity_fractions(options): + # Returns a list of multiplicity fractions for a given input list of masses + global Moecache + + # TODO: make an extrapolation functionality in this. log10(1.6e1) is low, we can probably go a bit further + + result = {} + + # TODO: decide what to do with this. + # if(0) + # { + # # we have these in a table, so could interpolate, but... + # $Moecache{'rinterpolator_fractions_M1_table'} //= + # rinterpolate->new( + # table => $Moecache{'fractions_M1_table'}, + # nparams => 1, # logM1 + # ndata => 4, # single/binary/triple/quadruple fractions + # ); + + # $r = $Moecache{'rinterpolator_fractions_M1_table'}->interpolate([log10($opts->{'M1'})]); + # } + + # ... it's better to interpolate the multiplicity and then + # use a Poisson distribution to calculate the fractions + # (this is more accurate) + # Set up the multiplicity interpolator + if not Moecache.get('rinterpolator_multiplicity', None): + Moecache['rinterpolator_multiplicity'] = py_rinterpolate.Rinterpolate( + table=Moecache['multiplicity_table'], # Contains the table of data + nparams=1, # logM1 + ndata=4 # The amount of datapoints (the parameters that we want to interpolate) + ) + + if options['multiplicity_model'] == 'Poisson': + multiplicity = Moecache['rinterpolator_multiplicity'].interpolate([np.log10(options['M1'])])[0] + + for n in range(4): + result[n] = options['multiplicity_modulator'][n] * poisson(multiplicity, n, 3) if options['multiplicity_modulator'][n] > 0 else 0 + + elif options['multiplicity_model'] == 'data': + # use the fractions calculated from Moe's data directly + # + # note that in this case, there are no quadruples: these + # are combined with triples + + for n in range(3): + result[n] = Moecache['rinterpolator_multiplicity'].interpolate([np.log10(options['M1'])])[n+1] * options['multiplicity_modulator'][n] + result[3] = 0.0 # no quadruples + + + # Normalisation: + if options['normalize_multiplicities'] == 'raw': + # do nothing : use raw Poisson predictions + pass + elif options['normalize_multiplicities'] == 'norm': + # simply normalize so all multiplicities add to 1 + sum_result = sum([result[key] for key in result.keys()]) + for key in result.keys(): + result[key] = result[key]/sum_result + elif options['normalize_multiplicities'] == 'merge': + # if multiplicity == 1, merge binaries, triples and quads into singles + # (de facto this is the same as "norm") + # if multiplicity == 2, merge triples and quads into binaries + # if multiplicity == 3, merge quads into triples + # if multiplicity == 4, do nothing, equivalent to 'raw'. + + # TODO: ask rob about this part. Not sure if i understand it. + + multiplicitty = 0 + for n in range(4): + if options['multiplicity_modulator'][n] > 0: + multiplicity = n + 1 + print("Multiplicity: {}: {}".format(multiplicity, str(result))) + + if multiplicity == 1: + result[0] = 1.0 + for n in range(1, 4): + result[0] += result[n] + result[n] = 0 + + elif multiplicity == 2: + # we have only singles and binaries: + # triples and quads are treated as binaries + + for n in [2, 3]: + result[1] += result[n] + result[n] = 0 + + elif multiplicity == 3: + # we have singles, binaries and triples: + # quads are treated as triples + result[2] += result[3] + result[3] = 0 + + elif multiplicity == 4: + # we have singles, binaries, triples and quads, + # so do nothing + pass + else: + print("Error: in the Moe distribution, we seem to want no stars at all (multiplicity == {}).\n".format(multiplicity)) + + sum_result = sum([result[key] for key in result.keys()]) + for key in result.keys(): + result[key] = result[key]/sum_result + + print("Multiplicity array: {}".format(str(result))) + + # return array reference + return result + +def build_q_table(options, m, p): + ############################################################ + # + # Build an interpolation table for q, given a mass and + # orbital period. + # + # $m and $p are labels which determine which system(s) + # to look up from Moe's data: + # + # $m can be M1, M2, M3, M4, or if set M1+M2 etc. + # $p can be P, P2, P3 + # + # The actual values are in $opts: + # + # mass is in $opts->{$m} + # period is $opts->{$p} + # + ############################################################ + + # Since the information from the table for M&S is independent of any choice we make, + # we need to take into account that for example our choice of minimum mass leads to a minimum q_min that is not the same as in the table + # We should ignore those parts of the table and renormalize. If we are below the lowest value of qmin in the table we need to extrapolate the data + + # We can check if we have a cached value for this already: + # TODO: fix this cache check + incache = False + if Moecache.get('rinterpolator_q_metadata', None): + if Moecache['rinterpolator_q_metadata'][m] == options[m]: + if Moecache['rinterpolator_q_metadata'] == options[p]: + incache = True + + # + if not incache: + # trim and/or expand the table to the range $qmin to $qmax. + + # qmin is set by the minimum stellar mass : below this + # the companions are planets + qmin = options['ranges']['M'][0] # TODO: this lower range must not be lower than Mmin. However, since the q_min is used as a sample range for the M2, it should use this value I think. discuss + + # TODO: fix that this works. Should depend on metallicity too I think. iirc this function does not work now. + # qmax = maximum_mass_ratio_for_RLOF(options[m], options[p]) + # TODO: change this to the above + qmax = 1 + + # qdata contains the table that we modify: we get + # the original data by interpolating Moe's table + qdata = [] + can_renormalize = 1 + + qeps = 1e-8 # small number but such that qeps+1 != 1 + if (qeps+1==1.0): + printf("qeps (= {}) +1 == 1. Make qeps larger".format(qeps)) + + if (qmin >= qmax): + # there may be NO binaries in this part of the parameter space: + # in which case, set up a table with lots of zero in it + + qdata = {0:0, 1:0} + can_renormalize = 0 + + else: + # qmin and qmax mean we'll get something non-zero + can_renormalize = 1 + + # require extrapolation sets whether we need to extrapolate + # at the low and high ends + require_extrapolation = {} + + + if (qmin >= 0.15): + # qmin is inside Moe's table : this is easy, + # we just keep points from qmin at the low + # end to qmax at the high end. + require_extrapolation['low'] = 0 + require_extrapolation['high'] = 1 # TODO: shouldnt the extrapolation need to happen if qmax > 0.95 + qdata[qmin] = Moecache['rinterpolator_q'].interpolate( + [ + np.log10(options[m]), + np.log10(options[p]) + ] + )[0] + + for q in np.arange(0.15, 0.950001, 0.1): + if (q >= qmin) and (q <= qmax): + qdata[q] = Moecache['rinterpolator_q'].interpolate( + [ + np.log10(options[m]), + np.log10(options[p]), + q + ] + )[0] + else: + require_extrapolation['low'] = 1 + require_extrapolation['high'] = 1 + if (qmax < 0.15): + # qmax < 0.15 which is off the edge + # of the table. In this case, choose + # two points at q=0.15 and 0.16 and interpolate + # at these in case we want to extrapolate. + for q in [0.15, 0.16]: + qdata[q] = Moecache['rinterpolator_q'].interpolate( + [ + np.log10(options[m]), + np.log10(options[p]), + q + ] + )[0] + else: + # qmin < 0.15 and qmax > 0.15, so we + # have to generate Moe's table for + # q = 0.15 (i.e. 0.1 to 0.2) to 0.95 (0.9 to 1) + # as a function of M1 and orbital period, + # to obtain the q distribution data. + + for q in np.arange(0.15, np.min(0.950001, qmax + 0.0001), 0.1): + qdata[q] = Moecache['rinterpolator_q'].interpolate( + [ + np.log10(options[m]), + np.log10(options[p]), + q + ] + )[0] + + # just below qmin, if qmin>qeps, we want nothing + if (qmin - 0.15 > qeps): + q = qmin - qeps + qdata[q] = 0 + require_extrapolation['low'] = 0 + + # just above qmax, if qmax<1, we want nothing + if (qmax < 0.95): + q = qmax + qeps + qdata[q] = 0 + require_extrapolation['high'] = 0 + + # sorted list of qs + qs = sorted(qdata.keys()) + + if len(qs) == 0: + print("No qs found error") + raise ValueError + + elif len(qs) == 1: + # only one q value : pretend there are two + # with a flat distribution up to 1.0. + if qs[0] == 1.0: + qs[0] = 1.0 - 1e-6 + qs[1] = 1 + qdata[qs[0]] = 1 + qdata[qs[1]] = 1 + else: + qs[1] = 1.0 + qdata[qs[1]] = qs[0] + + + else: + for pre in ['low', 'high']: + if (require_extrapolation[pre]==0): + continue + else: + sign = -1 if pre == 'low' else 1 + end_index = 0 if pre == 'low' else len(qs)-1 + indices = [0, 1] if pre == 'low' else [len(qs)-1, len(qs)-2] + method = options.get('q_{}_extrapolation_method', None) + qlimit = qmin if pre == 'log' else qmax + + print("Q: {} method: {}".format(pre, method)) + print("indices: {}".format(indices)) + print("End index: {}".format(end_index)) + print("QS: {}".format(str(indices))) + + # truncate the distribution + qdata[max(0.0, min(1.0, qlimit + sign * qeps))] = 0 + + if method==None: + # no extrapolation : just interpolate between 0.10 and 0.95 + continue + elif method=='flat': + # use the end value value and extrapolate it + # with zero slope + qdata[qlimit] = qdata[qs[end_index]] + elif method=='linear': + # linear extrapolation + print("Linear 2 {}".format(pre)) + dq = qs[indices[1]] - qs[indices[0]] + + if dq==0: + # No change + print("dq = 0") + qdata[qlimit] = qs[end_index] + else: + slope = (qdata[qs[indices[1]]] - qdata[qs[indices[0]]])/dq + intercept = qdata[qs[indices[0]]] - slope * qs[indices[0]] + qdata[qlimit] = max(0.0, slope * qlimit + intercept) + print("Slope: {} intercept: {} dn/dq({}) = {}".format(slope, intercept, qlimit, qdata[qlimit])) + elif method=='plaw2': + newq = 0.05; + # use a power-law extrapolation down to q=0.05, if possible + if (qdata[qs[indices[0]]] == 0) and (qdata[qs[indices[1]]] == 0.0): + # not possible + qdata[newq] = 0 + else: + slope = (np.log10(qdata[qs[indices[1]]]) - np.log10(qdata[qs[indices[0]]]))/(np.log10(qs[indices[1]])-np.log10(qs[indices[0]])) + intercept = np.log10(qdata[qs[indices[0]]]) - slope * log10(qs[indices[0]]) + qdata[newq] = slope * newq + intercept + + elif method=='nolowq': + newq = 0.05 + qdata[newq] = 0 + else: + print("No other methods available") + raise ValueError + # TODO: consider implementing this + # elsif($method =~ /^(log)?poly(\d+)/) + # { + # ############################################################ + # # NOT WORKING / TESTED + # ############################################################ + + # # fit a polynomial of degree n in q or log10(q) + # my $dolog = defined $1 ? 1 : 0; + # my $n = $2; + + # # make xdata : list of qs + # my @xdata; + # if($dolog) + # { + # @xdata = @qs; # linear + # } + # else + # { + # @xdata = map{log10(MAX(1e-20,$_))}@qs; # log + # } + + # # make ydata : from JSON + # my @ydata = map{$qdata->{$_}}@qs; # y data + + # # make parameters: these are 1/nn + # my @parameters; # parameters + # $#parameters = $n - 1; # set length of parameters array + # { + # my $nn = $n; + # @parameters = map{1.0/$nn--}@parameters; # set all to 1.0/parameter number + # } + + # # make polynomial with parameters + # my $formula = Math::Polynomial->new(@parameters); # use a polynomial fit + # my $variable ='x'; + # my $max_iter = 100; + # my $square_residual = Algorithm::CurveFit->curve_fit( + # $formula, + # \@parameters, + # $variable, + # \@xdata, + # \@ydata, + # $max_iter); + + # # evaluate at q=0.05 + # my $newq = 0.05; + # $qdata->{$newq} = $formula->evaluate($newq); + + # print Data::Dumper::Dumper(\@parameters); + # exit; + # } + + # regenerate qs in new table + tmp_table = [] + for q in sorted(qdata.keys()): + tmp_table.append([q, qdata[q]]) + + # Make an interpolation table to contain our modified data + q_interpolator = py_rinterpolate.Rinterpolate( + table=tmp_table, # Contains the table of data + nparams=1, # q + ndata=1 # + ) + # TODO: build a check in here to see if the interpolator build was successful + + print("Can renormalize?: {}".format(can_renormalize)) + if can_renormalize: + # now we integrate and renormalize (if the table is not all zero) + # + dq = 1e-3 # resolution of the integration/renormalization + I = 0 + + # integrate: note that the value of the integral is + # meaningless to within a factor (which depends on $dq) + for q in np.arange(0, 1+ 2e-6, dq): + x = q_interpolator.interpolate( + [q] + ) + if len(x)==0: + print("Q interpolator table interpolation failed") + print("tmp_table = {}".format(str(tmp_table))) + print("q_data = {}".format(str(qdata))) + raise ValueError + else: + I += x[0]*dq + print("dn/dq ({}) = {}".format()) + + if I > 0: + # normalize to 1.0 by dividing the data by 1.0/$I + q_interpolator.multiply_table_column(1, 1.0/I) + + # test this + I = 0 + for q in np.arange(0, 1+ 2e-6, dq): + I += q_interpolator.interpolate( + [q] + )[0] * dq + print("Q integral: {}, {}".format(I, q_interpolator)) + + # fail if error in integral > 1e-6 (should be ~ machine precision) + if (abs(1.0 - I) > 1e-6): + print("Error: > 1e-6 in q probability integral: {}".format(I)) + + # set this new table in the cache + Moecache['rinterpolator_q_given_{}_log10{}'.format(m, p)] = q_interpolator + Moecache['rinterpolator_q_metadata'][m] = options[m] + Moecache['rinterpolator_q_metadata'][p] = options[p] + + +def Moe_de_Stefano_2017_pdf(options): + # Moe and de Stefano probability density function + # + # takes a dctionary as input (in options) with options: + # + # M1, M2, M3, M4 => masses (Msun) [M1 required, rest optional] + # P, P2, P3 => periods (days) [number: none=binary, 2=triple, 3=quadruple] + # ecc, ecc2, ecc3 => eccentricities [numbering as for P above] + # + # mmin => minimum allowed stellar mass (default 0.07) + # mmax => maximum allowed stellar mass (default 80.0) + # + + prob = [] # Value that we will return + + # Separation of the inner binary + options['sep'] = calc_sep_from_period(options['M1'], options['M2'], options['P']) + + # Total mass inner binary: + options['M1+M2'] = options['M1'] + options['M2'] + + multiplicity = options['multiplicity'] + if not options.get('multiplicity', None): + multiplicity = 1 + for n in range(2, 5): + multiplicity += 1 if options.get('M{}'.format(n), None) else 0 + else: + multiplicity = options['multiplicity'] + + # immediately return 0 if the multiplicity modulator is 0 + if options['multiplicity_modulator'][multiplicity - 1] == 0: + print("_pdf ret 0 because of mult mod\n") + return 0 + + ############################################################ + # multiplicity fraction + prob.append(Moe_de_Stefano_2017_multiplicity_fractions(opts)[multiplicity-1]) + + ############################################################ + # always require an IMF for the primary star + # + # NB multiply by M1 to convert dN/dM to dN/dlnM + # (dlnM = dM/M, so 1/dlnM = M/dM) + + # TODO: Create an n-part-powerlaw method that can have breakpoints and slopes. I'm using a three-part powerlaw now. + prob.append(Kroupa2001(options['M1']) * options['M1']) + + if(multiplicity >= 2): + # binary, triple or quadruple system + if not Moecache.get("rinterpolator_log10P", None): + Moecache['rinterpolator_log10P'] = py_rinterpolate.Rinterpolate( + table=Moecache['period_distributions'], # Contains the table of data + nparams=2, # log10M, log10P + ndata=2 # binary, triple + ) + + # TODO: fix this function + # test the period distribution integrates to 1.0 + # if(0){ + # if(!defined $Moecache{'P_integrals'}->{$opts->{'M1'}}) + # { + # my $I = 0.0; + # my $dlogP = 1e-3; + # for(my $logP = 0.0; $logP < 10.0; $logP += $dlogP) + # { + # my $dp_dlogP = # dp / dlogP + # $Moecache{'rinterpolator_log10P'}->interpolate( + # [ + # log10($opts->{'M1'}), + # $logP + # ])->[0]; + # $I += $dp_dlogP * $dlogP; + # #printf "logM1 = %g, logP = %g -> dp/logP = %g\n", + # # log10($opts->{'M1'}), + # # $logP, + # # $dp_dlogP; + # } + # $Moecache{'P_integrals'}->{$opts->{'M1'}} = $I; + + # #printf "M1=%g : P integral %g\n", + # # $opts->{'M1'}, + # # $Moecache{'P_integrals'}->{$opts->{'M1'}}; + # } + # } + + prob.append( + Moecache['rinterpolator_log10P'].interpolate( + [ + np.log10(options['M1']), + np.log10(options['P']) + ] + )[0] + ) + + ############################################################ + # mass ratio (0 < q = M2/M1 < qmax) + # + # we need to construct the q table for the given M1 + # subject to qmin = Mmin/M1 + + # Make a table storing Moe's data for q distributions + + if not Moecache.get("rinterpolator_q", None): + Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate( + table=Moecache['q_distributions'], # Contains the table of data + nparams=3, # log10M, log10P, q + ndata=1 # + ) + + # Build the table for q + primary_mass = options['M1'] + secondary_mass = options['M2'] + m_label = "M1" + p_label = "P" + + build_q_table(options, m_label, p_label) + prob.append( + Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate( + [secondary_mass/primary_mass] + )[0] + ) + + # TODO add eccentricity calculations + + if (multiplicity >= 3): + # triple or quadruple system + + ############################################################ + # orbital period 2 = + # orbital period of star 3 (multiplicity==3) or + # the star3+star4 binary (multiplicity==4) + # + # we assume the same period distribution for star 3 + # (or stars 3 and 4) but with a separation that is >10*a*(1+e) + # where 10*a*(1+e) is the maximum apastron separation of + # stars 1 and 2 + + # TODO: Is this a correct assumption? + max_sep = 10.0 * options['sep'] * (1.0 + options['ecc']) # TODO: isnt this the minimal separation? + min_P2 = calc_period_from_sep(options['M1+M2'], options['mmin'], max_sep) + + if (options['P2'] < min_P2): + # period is too short : system is not hierarchical + prob.append(0) + else: + # period is long enough that the system is hierarchical + + # hence the separation between the outer star + # and inner binary + options['sep2'] = calc_sep_from_period(options['M3'], options['M1+M2'], options['P2']) + + if not Moecache.get("rinterpolator_log10P2", None): + Moecache['rinterpolator_log10P2'] = py_rinterpolate.Rinterpolate( + table=Moecache['period_distributions'], # Contains the table of data + nparams=2, # log10(M1+M2), log10P # TODO: Really? Is the first column of that table M1+M2? + ndata=2 # binary, triple + ) + + if not Moecache.get('P2_integrals', None): + Moecache['P2_integrals'] = {} + + if not Moecache['P2_integrals'].get(options['M1+M2'], None): + # normalize because $min_per > 0 so not all the periods + # should be included + I_p2 = 0 + dlogP2 = 1e-3 + for logP2 in np.arange(np.log10(min_P2), 10, dlogP2): + # dp_dlogP2 = dp / dlogP + dp_dlogP2 = Moecache['rinterpolator_log10P2'].interpolate( + [ + np.log10(options['M1']), + logP2 + ] + )[0] + I_p2 += dp_dlogP2 * dlogP2 + Moecache['P2_integrals'][options['M1+M2']] = I_p2 + + p_val = Moecache['rinterpolator_log10P'].interpolate( + [ + np.log10(options['M1+M2']), + np.log10(options['P2']) + ] + )[0] + p_val = p_val / Moecache['P2_integrals'] + prob.append(p_val) + + ############################################################ + # mass ratio 2 = q2 = M3 / (M1+M2) + # + # we need to construct the q table for the given M1 + # subject to qmin = Mmin/(M1+M2) + # + # Make a table storing Moe's data for q distributions + # TODO: Check if this is correct. + if not Moecache.get("rinterpolator_q", None): + Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate( + table=Moecache['q_distributions'], # Contains the table of data + nparams=3, # log10(M1+M2), log10P2, q + ndata=1 # + ) + + # Build the table for q2 + primary_mass = options['M1+M2'] + secondary_mass = options['M3'] + m_label = "M1+M2" + p_label = "P2" + + build_q_table(options, m_label, p_label) + prob.append( + Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate( + [secondary_mass/primary_mass] + )[0] + ) + + # TODO: ecc2 + + if(multiplicity == 4): + # quadruple system. + # TODO: Ask Rob about the strructure of the quadruple. Is htis only double binary quadrupples? + + ############################################################ + # orbital period 3 + # + # we assume the same period distribution for star 4 + # as for any other stars but Pmax must be such that + # sep3 < sep2 * 0.2 + + max_sep3 = 0.2 * options['sep2'] * (1.0 + opts['ecc2']) + max_per3 = calc_period_from_sep(options['M1+M2'], options['mmin'], max_sep3) + + if not Moecache.get("rinterpolator_log10P2", None): + Moecache['rinterpolator_log10P2'] = py_rinterpolate.Rinterpolate( + table=Moecache['period_distributions'], # Contains the table of data + nparams=2, # log10(M1+M2), log10P3 + ndata=2 # binary, triple + ) + + if not Moecache.get('P2_integrals', None): + Moecache['P2_integrals'] = {} + + if not Moecache['P2_integrals'].get(options['M1+M2'], None): + # normalize because $min_per > 0 so not all the periods + # should be included + I_p2 = 0 + dlogP2 = 1e-3 + + for logP2 in np.arange(np.log10(min_P2), 10, dlogP2): + # dp_dlogP2 = dp / dlogP + dp_dlogP2 = Moecache['rinterpolator_log10P2'].interpolate( + [ + np.log10(options['M1']), + logP2 + ] + )[0] + I_p2 += dp_dlogP2 * dlogP2 + Moecache['P2_integrals'][options['M1+M2']] = I_p2 + + # TODO: should this really be the log10P interpolator? + p_val = Moecache['rinterpolator_log10P'].interpolate( + [ + np.log10(options['M1+M2']), + np.log10(options['P2']) + ] + )[0] + + p_val = p_val / Moecache['P2_integrals'] + prob.append(p_val) + + ############################################################ + # mass ratio 2 + # + # we need to construct the q table for the given M1 + # subject to qmin = Mmin/(M1+M2) + # Make a table storing Moe's data for q distributions + # TODO: Check if this is correct. + if not Moecache.get("rinterpolator_q", None): + Moecache['rinterpolator_q'] = py_rinterpolate.Rinterpolate( + table=Moecache['q_distributions'], # Contains the table of data + nparams=3, # log10(M1+M2), log10P2, q + ndata=1 # + ) + + # Build the table for q2 + primary_mass = options['M1+M2'] + secondary_mass = options['M3'] + m_label = "M1+M2" + p_label = "P2" + + build_q_table(options, m_label, p_label) + prob.append( + Moecache['rinterpolator_q_given_{}_log10{}'.format(m_label, p_label)].interpolate( + [secondary_mass/primary_mass] + )[0] + ) + + # todo ecc 3 + + elif (multiplicity not in range(1, 5)): + print("Unknown multiplicity {}\n".format(multiplicity)) + + # TODO: Translate this to get some better output + #printf "PDFPROBS (M1=%g, logP=%g q=%g) : @prob\n", + # $opts->{'M1'}, + # $multiplicity == 2 ? ($opts->{'M2'}/$opts->{'M1'}) : -999, + # $multiplicity == 2 ? log10($opts->{'P'}) : -999; + + # the final probability density is the product of all the + # probability density functions + # TODO: Where do we take into account the stepsize? probdens is not a probability if we dont take a stepsize + prob_dens = 1 * np.prod(prob) + + print_info = 1 + if print_info: + print("Probability density") + if multiplicity==1: + print("M1={} q=N/A log10P=N/A ({}): {} -> {}\n".format( + options['M1'], len(prob), str(prob), prob_dens + ) + ) + elif multipliticty==2: + print("M1={} q={} log10P={} ({}): {} -> {}\n".format( + options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), str(prob), prob_dens + ) + ) + elif multiplicity==3: + print("M1={} q={} log10P={} ({}): M3={}} P2={} ecc2={} : {} - {}".format( + options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), options['M3'], np.log10('P2'), np.log10('ecc2'), str(prob), prob_dens + ) + ) + elif multipliticty==4: + print("M1={} q={} log10P={} ({}) : M3={} P2={}} ecc2={} : M4={} P3={} ecc3={} : {} -> {}".format( + options['M1'], options['M2']/options['M1'], np.log10(options['P']), len(prob), options['M3'], np.log10('P2'), np.log10(options['ecc2']), options['M4'], np.log10('P3'), np.log10(options['ecc3']), str(prob), prob_dens + ) + ) + return prob_dens + diff --git a/binarycpython/utils/functions.py b/binarycpython/utils/functions.py index adad1a08ea636b3edd296ef8fc093b05839bb623..e96fc6b36ff7a2eb5fa2cc8e67d52b4c929a4311 100644 --- a/binarycpython/utils/functions.py +++ b/binarycpython/utils/functions.py @@ -25,11 +25,25 @@ from collections import defaultdict from binarycpython import _binary_c_bindings +######################################################## +# Unsorted +######################################################## + +class catchtime(object): + def __enter__(self): + self.t = time.clock() + return self + + def __exit__(self, type, value, traceback): + self.t = time.clock() - self.t + print("Took {}s".format(self.t)) + def is_capsule(o): t = type(o) return t.__module__ == 'builtins' and t.__name__ == 'PyCapsule' + class Capturing(list): def __enter__(self): self._stdout = sys.stdout @@ -40,8 +54,6 @@ class Capturing(list): del self._stringio # free up some memory sys.stdout = self._stdout - - ######################################################## # utility functions ######################################################## diff --git a/binarycpython/utils/grid.py b/binarycpython/utils/grid.py index bc73f755beb8a7106be9b2f467a0fb8995bd6ffa..9e8836f02c0b972a6d6671dc22f33c37dc249495 100644 --- a/binarycpython/utils/grid.py +++ b/binarycpython/utils/grid.py @@ -72,7 +72,6 @@ from binarycpython.utils.functions import ( from binarycpython import _binary_c_bindings import copy - class Population: """ Population Object. Contains all the necessary functions to set up, run and process a @@ -311,6 +310,15 @@ class Population: argline = argline.strip() return argline + def last_grid_variable(self): + """ + Functon that returns the last grid variable (i.e. the one with the highest grid_variable_number) + """ + + number = len(self.grid_options["_grid_variables"]) + for grid_variable in self.grid_options["_grid_variables"]: + print(self.grid_options["_grid_variables"][grid_variable]['grid_variable_number']) + def add_grid_variable( self, name: str, @@ -2653,3 +2661,682 @@ class Population: "binary_c had 0 output. Which is strange", self.grid_options['verbosity'], 2) ################################################################################################ + def Moe_de_Stefano_2017(self, options=None): + """ + Class method of the population class + + Takes a dictionary as its only argument + + The dictionary should contain a file to + """ + + # Checks on the input + if not options: + print("Please provide a options dictionary.") + raise ValueError + + if not options.get('file', None): + print("Please set option 'file' to point to the Moe and de Stefano JSON file") + raise ValueError + else: + if not os.path.isfile(options['file']): + print("The provided 'file' Moe and de Stefano JSON file does not seem to exist at {}".format(options['file'])) + raise ValueError + if not options['file'].endswith('.json'): + print("Provided filename does not end with .json") + + # Set default values + default_options = { + 'resolutions': + { + 'M': [ + 100, # M1 (irrelevant if time adaptive) + 40, # M2 (i.e. q) + 10, # M3 currently unused + 10 # M4 currently unused + ], + 'logP': [ + 40, # P2 (binary period) + 2, # P3 (triple period) currently unused + 2 # P4 (quadruple period) currently unused + ], + 'ecc': [ + 10, # e2 (binary eccentricity) currently unused + 10, # e3 (triple eccentricity) currently unused + 10 # e4 (quadruple eccentricity) currently unused + ], + }, + + 'ranges': + { + # stellar masses (Msun) + 'M': [ + 0.07, # maximum brown dwarf mass == minimum stellar mass + 80.0 # (rather arbitrary) upper mass cutoff + ], + + 'q': [ + None, # artificial qmin : set to None to use default + None # artificial qmax : set to None to use default + ], + + 'logP': [ + 0.0, # 0 = log10(1 day) + 8.0 # 8 = log10(10^8 days) + ], + + 'ecc': [ + 0.0, + 1.0 + ] + }, + + # minimum stellar mass + 'Mmin':0.07, + + # multiplicity model (as a function of log10M1) + # + # You can use 'Poisson' which uses the system multiplicty + # given by Moe and maps this to single/binary/triple/quad + # fractions. + # + # Alternatively, 'data' takes the fractions directly + # from the data, but then triples and quadruples are + # combined (and there are NO quadruples). + 'multiplicity_model': 'Poisson', + + # multiplicity modulator: + # [single, binary, triple, quadruple] + # + # e.g. [1,0,0,0] for single stars only + # [0,1,0,0] for binary stars only + # + # defaults to [1,1,1,1] i.e. all types + # + 'multiplicity_modulator': [ + 1, # single + 1, # binary + 1, # triple + 1, # quadruple + ], + + # given a mix of multiplities, you can either (noting that + # here (S,B,T,Q) = appropriate modulator * model(S,B,T,Q) ) + # + # 'norm' : normalize so the whole population is 1.0 + # after implementing the appropriate fractions + # S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q) + # + # 'raw' : stick to what is predicted, i.e. + # S/(S+B+T+Q), B/(S+B+T+Q), T/(S+B+T+Q), Q/(S+B+T+Q) + # without normalization + # (in which case the total probability < 1.0 unless + # all you use single, binary, triple and quadruple) + # + # 'merge' : e.g. if you only have single and binary, + # add the triples and quadruples to the binaries, so + # binaries represent all multiple systems + # ... + # *** this is canonical binary population synthesis *** + # + # Note: if multiplicity_modulator == [1,1,1,1] this + # option does nothing (equivalent to 'raw'). + # + # + # note: if you only set one multiplicity_modulator + # to 1, and all the others to 0, then normalizing + # will mean that you effectively have the same number + # of stars as single, binary, triple or quad (whichever + # is non-zero) i.e. the multiplicity fraction is ignored. + # This is probably not useful except for + # testing purposes or comparing to old grids. + 'normalize_multiplicities': 'merge', + + # q extrapolation (below 0.15) method + # none + # flat + # linear2 + # plaw2 + # nolowq + 'q_low_extrapolation_method': 'linear2', + 'q_high_extrapolation_method': 'linear2', + } + + # Take the option dictionary that was given and override. + options = {**default_options, **options} + + # Write to a file + with open('/tmp/moeopts.dat', 'w') as f: + f.write(json.dumps(options, indent=4)) + + # Read input data and Clean up the data if there are whitespaces around the keys + with open(options['file'], 'r') as data_filehandle: + datafile_data = data_filehandle.read() + datafile_data = datafile_data.replace("\" ", "\"") + datafile_data = datafile_data.replace(" \"", "\"") + datafile_data = datafile_data.replace(" \"", "\"") + json_data = json.loads(datafile_data) + + # entry of log10M1 is a list containing 1 dict. We can take the dict out of the list + json_data['log10M1'] = json_data['log10M1'][0] + + # Get all the masses + logmasses = sorted(json_data['log10M1'].keys()) + if not logmasses: + print("The table does not contain masses") + raise ValueError + with open("/tmp/moe.log", 'w') as logfile: + logfile.write("logâ‚â‚€Masses(M☉) {}\n".format(logmasses)) + + # Get all the periods and see if they are all consistently present + logperiods = [] + for logmass in logmasses: + if not logperiods: + logperiods = sorted(json_data['log10M1'][logmass]['logP']) + dlog10P = float(logperiods[1]) - float(logperiods[0]) + + current_logperiods = sorted(json_data['log10M1'][logmass]['logP']) + + if not (logperiods == current_logperiods): + print("Period values are not consistent throughout the dataset") + raise ValueError + + + ############################################################ + # log10period binwidth : of course this assumes a fixed + # binwidth, so we check for this too. + + for i in range(len(current_logperiods)-1): + if not dlog10P == (float(current_logperiods[i+1]) - float(current_logperiods[i])): + print("Period spacing is not consistent throughout the dataset") + raise ValueError + + with open("/tmp/moe.log", 'a') as logfile: + logfile.write("logâ‚â‚€Periods(days) {}\n".format(logperiods)) + + # Fill the global dict + for logmass in logmasses: + # print("Logmass: {}".format(logmass)) + + # Create the multiplicity table + if not Moecache.get('multiplicity_table', None): + Moecache['multiplicity_table'] = [] + + # multiplicity as a function of primary mass + Moecache['multiplicity_table'].append( + [ + float(logmass), + json_data['log10M1'][logmass]['f_multi'], + json_data['log10M1'][logmass]['single star fraction'], + json_data['log10M1'][logmass]['binary star fraction'], + json_data['log10M1'][logmass]['triple/quad star fraction'], + ] + ) + + ############################################################ + # a small log10period which we can shift just outside the + # table to force integration out there to zero + epslog10P = 1e-8 * dlog10P + + ############################################################ + # loop over either binary or triple-outer periods + first = 1; + + # Go over the periods + for logperiod in logperiods: + # print("logperiod: {}".format(logperiod)) + ############################################################ + # distributions of binary and triple star fractions + # as a function of mass, period. + # + # Note: these should be per unit log10P, hence we + # divide by $dlog10P + + if first: + first = 0 + + # Create the multiplicity table + if not Moecache.get('period_distributions', None): + Moecache['period_distributions'] = [] + + ############################################################ + # lower bound the period distributions to zero probability + Moecache['period_distributions'].append( + [ + float(logmass), + float(logperiod) - 0.5 * dlog10P - epslog10P, + 0.0, + 0.0 + ] + ) + Moecache['period_distributions'].append( + [ + float(logmass), + float(logperiod) - 0.5 * dlog10P, + json_data['log10M1'][logmass]['logP'][logperiod]['normed_bin_frac_p_dist']/dlog10P, + json_data['log10M1'][logmass]['logP'][logperiod]['normed_tripquad_frac_p_dist']/dlog10P + ] + ) + Moecache['period_distributions'].append( + [ + float(logmass), + float(logperiod), + json_data['log10M1'][logmass]['logP'][logperiod]['normed_bin_frac_p_dist']/dlog10P, + json_data['log10M1'][logmass]['logP'][logperiod]['normed_tripquad_frac_p_dist']/dlog10P + ] + ) + + ############################################################ + # distributions as a function of mass, period, q + # + # First, get a list of the qs given by Moe + # + qs = sorted(json_data['log10M1'][logmass]['logP'][logperiod]['q']) + qdata = {} + I_q = 0.0 + + # get the q distribution from M&S, which ranges 0.15 to 0.95 + for q in qs: + val = json_data['log10M1'][logmass]['logP'][logperiod]['q'][q] + qdata[q] = val + I_q += val + + # hence normalize to 1.0 + for q in qs: + qdata[q] = qdata[q]/I_q + + # Create the multiplicity table + if not Moecache.get('q_distributions', None): + Moecache['q_distributions'] = [] + + for q in qs: + Moecache['q_distributions'].append( + [ + float(logmass), + float(logperiod), + float(q), + qdata[q] + ] + ) + + ############################################################ + # eccentricity distributions as a function of mass, period, ecc + eccs = sorted(json_data['log10M1'][logmass]['logP'][logperiod]['e']) + ecc_data = {} + I_e = 0 + + # Read out the data + for ecc in eccs: + val = json_data['log10M1'][logmass]['logP'][logperiod]['e'][ecc] + ecc_data[ecc] = val + I_e += val + + # Normalize the data + for ecc in eccs: + ecc_data[ecc] = ecc_data[ecc]/I_e + + # Create the multiplicity table + if not Moecache.get('ecc_distributions', None): + Moecache['ecc_distributions'] = [] + + for ecc in eccs: + Moecache['ecc_distributions'].append( + [ + float(logmass), + float(logperiod), + float(ecc), + ecc_data[ecc] + ] + ) + + ############################################################ + # upper bound the period distributions to zero probability + Moecache['period_distributions'].append( + [ + float(logmass), + float(logperiods[-1]) + 0.5 * dlog10P, + json_data['log10M1'][logmass]['logP'][logperiods[-1]]['normed_bin_frac_p_dist']/dlog10P, + json_data['log10M1'][logmass]['logP'][logperiods[-1]]['normed_tripquad_frac_p_dist']/dlog10P + ] + ) + Moecache['period_distributions'].append( + [ + float(logmass), + float(logperiods[-1]) + 0.5 * dlog10P + epslog10P, + 0.0, + 0.0 + ] + ) + + # Write to logfile + with open("/tmp/moecache.json", 'w') as cache_filehandle: + cache_filehandle.write(json.dumps(Moecache, indent=4)) + + # TODO: remove this and make the plotting of the multiplicity fractions more modular + # options['M1'] = 10 + + # # multiplicity_fractions = Moe_de_Stefano_2017_multiplicity_fractions(options) + + # import matplotlib.pyplot as plt + + # mass_range = range(1, 80) + + # multiplicity_dict = {} + # for mass in mass_range: + # options['M1'] = mass + # multiplicity_fractions = Moe_de_Stefano_2017_multiplicity_fractions(options) + # multiplicity_dict[mass] = multiplicity_fractions + + # single_values = [multiplicity_dict[key][0] for key in multiplicity_dict.keys()] + # binary_values = [multiplicity_dict[key][1] for key in multiplicity_dict.keys()] + # triple_values = [multiplicity_dict[key][2] for key in multiplicity_dict.keys()] + # quad_values = [multiplicity_dict[key][3] for key in multiplicity_dict.keys()] + + # plt.plot(mass_range, single_values, label='single') + # plt.plot(mass_range, binary_values, label='binary') + # plt.plot(mass_range, triple_values, label='triple') + # plt.plot(mass_range, quad_values, label='Quadruple') + # plt.legend() + # plt.xscale('log') + # plt.show() + + ############################################################ + # construct the grid here + # + + ############################################################ + # make a version of the options which can be passed to + # the probability density function in the gridcode + # TODO: check if that works, if we just pass the options as a dict + + + ############################################################ + # first, the multiplicity, this is 1,2,3,4, ... + # for singles, binaries, triples, quadruples, ... + max_multiplicity = 0 + for i in range(1, 5): + mod = options['multiplicity_modulator'][i-1] + print("Check mod {} = {}".format(i, mod)) + if mod > 0: + max_multiplicity = i + print("Max multiplicity = {}".format(max_multiplicity)) + + ###### + # Setting up the grid variables + + # Multiplicity + self.add_grid_variable( + name='multiplicity', + longname='multiplicity', + range=[1, max_multiplicity], + resolution=1, + spacingfunc='number(1.0)', + precode='self.grid_options["multiplicity"] = multiplicity', + condition='if({}[{}-1] > 0'.format(str(options['multiplicity_modulator'])), + gridtype='edge', + noprobdist=1, + nophasevol=1 + ) + + ############################################################ + # always require M1, for all systems + # + + # TODO: put in option for the time-adaptive grid. + + # log-spaced m1 with given resolution + self.add_grid_variable( + name='lnm1', + longname='Primary mass', + resolution="options['resolutions']['M'][0]", + spacingsfunc="const(np.log({}), np.log({}),0 {})".format(options['ranges']['M'][0], options['ranges']['M'][1], options['resolutions']['M'][0]), + range=[ + 'np.log({})'.format(options['ranges']['M'][0]), + 'np.log({})'.format(options['ranges']['M'][1]) + ], + gridtype='centred', + dphasevol='dlnm1', + precode='m1 = np.exp(lnm1); options["M1"]=m1', + probdist='self.Moe_de_Stefano_2017_pdf(options) if multiplicity == 1 else 1' + ) + + # Go to higher multiplicities + if (max_multiplicity >= 2): + # binaries: period + self.add_grid_variable( + name='log10per', + longname='log10(Orbital_Period)', + resolution=options['resolutions']['logP'][0], + probdist=1.0, + condition='self.grid_options["grid_options"]["multiplicity"] >= 2', + branchpoint=1, + gridtype='centred', + dphasevol='({} * dlog10per)'.format(LOG_LN_CONVERTER), + valuerange=[ + options['ranges']['logP'][0], + options['ranges']['logP'][1] + ], + spacingfunc="const({}, {}, {})".format(options['ranges']['logP'][0], options['ranges']['logP'][1], options['resolutions']['logP'][0]), + precode="""per = 10.0**log10per + qmin={}/m1 + qmax=self.maximum_mass_ratio_for_RLOF(m1, per) + """.format(options.get('Mmin', 0.07)) + ) + + # binaries: mass ratio + self.add_grid_variable( + name='q', + longname='Mass ratio', + valuerange=[ + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/m1", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "qmax" + ], + resolution=options['resolutions']['M'][1], + probdist=1, + gridtype='centred', + dphasevol='dq', + precode=""" + m2 = q * m1 + sep = calc_sep_from_period(m1, m2, per) + """, + spacingfunc="const({}, {}, {})".format( + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/m1", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "qmax", + options['resolutions']['M'][1] + ) + ) + + # (optional) binaries: eccentricity + if options['resolutions']['ecc'][0] > 0: + self.add_grid_variable( + name='ecc', + longname='Eccentricity', + resolution=options['resolutions']['ecc'][0], + probdist=1, + gridtype='centred', + dphasevol='decc', + precode='eccentricity=ecc', + valuerange=[ + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1] + ], + spacingfunc="const({}, {}, {})".format( + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1], + options['resolutions']['ecc'][0] + ) + ) + + # Now for triples and quadruples + if (max_multiplicity >= 3): + # Triple: period + self.add_grid_variable( + name='log10per2', + longname='log10(Orbital_Period2)', + resolution=options['resolutions']['logP'][1], + probdist=1.0, + condition='self.grid_options["grid_options"]["multiplicity"] >= 3', + branchpoint=1, + gridtype='centred', + dphasevol='({} * dlog10per2)'.format(LOG_LN_CONVERTER), + valuerange=[ + options['ranges']['logP'][0], + options['ranges']['logP'][1] + ], + spacingfunc="const({}, {}, {})".format(options['ranges']['logP'][0], options['ranges']['logP'][1], options['resolutions']['logP'][1]), + precode="""per2 = 10.0**log10per2 + q2min={}/(m1+m2) + q2max=self.maximum_mass_ratio_for_RLOF(m1+m2, per2) + """.format(options.get('Mmin', 0.07)) + ) + + # Triples: mass ratio + # Note, the mass ratio is M_outer/M_inner + print("M2 resolution {}".format(options['resolutions']['M'][2])) + self.add_grid_variable( + name='q2', + longname='Mass ratio outer/inner', + valuerange=[ + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m1+m2)", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q2max" + ], + resolution=options['resolutions']['M'][2], + probdist=1, + gridtype='centred', + dphasevol='dq2', + precode=""" + m3 = q2 * (m1 + m2) + sep2 = calc_sep_from_period((m1+m2), m3, per2) + eccentricity2=0 + """, + spacingfunc="const({}, {}, {})".format( + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m1+m2)", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q2max", + options['resolutions']['M'][2] + ) + ) + + # (optional) triples: eccentricity + if options['resolutions']['ecc'][1] > 0: + self.add_grid_variable( + name='ecc2', + longname='Eccentricity of the triple', + resolution=options['resolutions']['ecc'][1], + probdist=1, + gridtype='centred', + dphasevol='decc2', + precode='eccentricity2=ecc2', + valuerange=[ + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1] + ], + spacingfunc="const({}, {}, {})".format( + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1], + options['resolutions']['ecc'][1] + ) + ) + + if (max_multiplicity==4): + # Quadruple: period + self.add_grid_variable( + name='log10per3', + longname='log10(Orbital_Period3)', + resolution=options['resolutions']['logP'][2], + probdist=1.0, + condition='self.grid_options["grid_options"]["multiplicity"] >= 4', + branchpoint=1, + gridtype='centred', + dphasevol='({} * dlog10per3)'.format(LOG_LN_CONVERTER), + valuerange=[ + options['ranges']['logP'][0], + options['ranges']['logP'][1] + ], + spacingfunc="const({}, {}, {})".format( + options['ranges']['logP'][0], + options['ranges']['logP'][1], + options['resolutions']['logP'][2] + ), + precode="""per3 = 10.0**log10per3 + q3min={}/(m3) + q3max=self.maximum_mass_ratio_for_RLOF(m3, per3) + """.format(options.get('Mmin', 0.07)) + ) + + # Quadruple: mass ratio : M_outer / M_inner + print("M3 resolution {}".format(options['resolutions']['M'][3])) + self.add_grid_variable( + name='q3', + longname='Mass ratio outer low/outer high', + valuerange=[ + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m3)", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q3max" + ], + resolution=options['resolutions']['M'][3], + probdist=1, + gridtype='centred', + dphasevol='dq3', + precode=""" + m4 = q3 * m3 + sep3 = calc_sep_from_period((m3), m4, per3) + eccentricity3=0 + """, + spacingfunc="const({}, {}, {})".format( + options['ranges']['q'][0] if options.get('ranges', {}).get('q', None) else "options.get('Mmin', 0.07)/(m3)", + options['ranges']['q'][1] if options.get('ranges', {}).get('q', None) else "q3max", + options['resolutions']['M'][2] + ) + ) + # TODO: Ask about which periods should be used for the calc_sep_from_period + + # (optional) triples: eccentricity + if options['resolutions']['ecc'][2] > 0: + self.add_grid_variable( + name='ecc3', + longname='Eccentricity of the triple+quadruple/outer binary', + resolution=options['resolutions']['ecc'][2], + probdist=1, + gridtype='centred', + dphasevol='decc3', + precode='eccentricity3=ecc3', + valuerange=[ + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1] + ], + spacingfunc="const({}, {}, {})".format( + options['ranges']['ecc'][0], # Just fail if not defined. + options['ranges']['ecc'][1], + options['resolutions']['ecc'][2] + ) + ) + + # Now we are at the last part. + # Here we should combine all the information that we calculate and update the options dictionary + # This will then be passed to the Moe_de_Stefano_2017_pdf to calculate the real probability + # The trick we use is to strip the options_dict as a string and add some keys to it: + + # and finally the probability calculator + self.last_grid_variable()['probdist']=""" + self.Moe_de_Stefano_2017_pdf(\{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}\}) + + """.format(str(options)[1:-1], + '"multiplicity": multiplicity', + + '"M1": m1', + '"M2": m2', + '"M3": m3', + '"M4": m4', + + '"P": per', + '"P2": per2', + '"P3": per3', + + '"ecc": eccentricity', + '"ecc2": eccentricity2', + '"ecc3": eccentricity3', + ) + + print(json.dumps(json.load(self.grid_options), indent=4)) + +# Moe_de_Stefano_2017(options={'file': '/home/david/projects/binary_c_root/binary_c/data/MdS_data.json.v2', 'testing': '1', 'resolutions': {'M': [1]}}) +