Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
120 changes: 68 additions & 52 deletions bilby/core/prior/analytical.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,10 @@ def cdf(self, val):
_cdf = (np.log(val / self.minimum) /
np.log(self.maximum / self.minimum))
else:
_cdf = np.atleast_1d(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1)) / \
(self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
_cdf = (
(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
/ (self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
)
_cdf = np.minimum(_cdf, 1)
_cdf = np.maximum(_cdf, 0)
return _cdf
Expand Down Expand Up @@ -367,16 +369,16 @@ def ln_prob(self, val):
return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))

def cdf(self, val):
val = np.atleast_1d(val)
norm = 0.5 / np.log(self.maximum / self.minimum)
cdf = np.zeros((len(val)))
lower_indices = np.where(np.logical_and(-self.maximum <= val, val <= -self.minimum))[0]
upper_indices = np.where(np.logical_and(self.minimum <= val, val <= self.maximum))[0]
cdf[lower_indices] = -norm * np.log(-val[lower_indices] / self.maximum)
cdf[np.where(np.logical_and(-self.minimum < val, val < self.minimum))] = 0.5
cdf[upper_indices] = 0.5 + norm * np.log(val[upper_indices] / self.minimum)
cdf[np.where(self.maximum < val)] = 1
return cdf
_cdf = (
-norm * np.log(abs(val) / self.maximum)
* (val <= -self.minimum) * (val >= -self.maximum)
+ (0.5 + norm * np.log(abs(val) / self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 0.5 * (val > -self.minimum) * (val < self.minimum)
+ 1 * (val > self.maximum)
)
return _cdf


class Cosine(Prior):
Expand Down Expand Up @@ -426,10 +428,12 @@ def prob(self, val):
return np.cos(val) / 2 * self.is_in_prior_range(val)

def cdf(self, val):
_cdf = np.atleast_1d((np.sin(val) - np.sin(self.minimum)) /
(np.sin(self.maximum) - np.sin(self.minimum)))
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(np.sin(val) - np.sin(self.minimum))
/ (np.sin(self.maximum) - np.sin(self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -480,10 +484,12 @@ def prob(self, val):
return np.sin(val) / 2 * self.is_in_prior_range(val)

def cdf(self, val):
_cdf = np.atleast_1d((np.cos(val) - np.cos(self.minimum)) /
(np.cos(self.maximum) - np.cos(self.minimum)))
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(np.cos(val) - np.cos(self.minimum))
/ (np.cos(self.maximum) - np.cos(self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -625,11 +631,13 @@ def prob(self, val):
/ self.sigma / self.normalisation * self.is_in_prior_range(val)

def cdf(self, val):
val = np.atleast_1d(val)
_cdf = (erf((val - self.mu) / 2 ** 0.5 / self.sigma) - erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2 / self.normalisation
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(
erf((val - self.mu) / 2 ** 0.5 / self.sigma)
- erf((self.minimum - self.mu) / 2 ** 0.5 / self.sigma)
) / 2 / self.normalisation * (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -1367,6 +1375,8 @@ def __init__(self, sigma, mu=None, r=None, name=None, latex_label=None,
raise ValueError("For the Fermi-Dirac prior the values of sigma and r "
"must be positive.")

self.expr = np.exp(self.r)

def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.
Expand All @@ -1384,21 +1394,8 @@ def rescale(self, val):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)

# if val is 1 this will cause inv to be negative (due to numerical
# issues), so return np.inf
if isinstance(val, (float, int)):
if inv < 0:
return np.inf
else:
return -self.sigma * np.log(inv)
else:
idx = inv >= 0.
tmpinv = np.inf * np.ones(len(np.atleast_1d(val)))
tmpinv[idx] = -self.sigma * np.log(inv[idx])
return tmpinv
inv = -1 / self.expr + (1 + self.expr)**-val + (1 + self.expr)**-val / self.expr
return -self.sigma * np.log(np.maximum(inv, 0))

def prob(self, val):
"""Return the prior probability of val.
Expand All @@ -1411,7 +1408,11 @@ def prob(self, val):
=======
float: Prior probability of val
"""
return np.exp(self.ln_prob(val))
return (
(np.exp((val - self.mu) / self.sigma) + 1)**-1
/ (self.sigma * np.log1p(self.expr))
* (val >= self.minimum)
)

def ln_prob(self, val):
"""Return the log prior probability of val.
Expand All @@ -1424,19 +1425,34 @@ def ln_prob(self, val):
=======
Union[float, array_like]: Log prior probability of val
"""
return np.log(self.prob(val))

norm = -np.log(self.sigma * np.log(1. + np.exp(self.r)))
if isinstance(val, (float, int)):
if val < self.minimum:
return -np.inf
else:
return norm - np.logaddexp((val / self.sigma) - self.r, 0.)
else:
val = np.atleast_1d(val)
lnp = -np.inf * np.ones(len(val))
idx = val >= self.minimum
lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
return lnp
def cdf(self, val):
"""
Evaluate the CDF of the Fermi-Dirac distribution using a slightly
modified form of Equation 23 of [1]_.

Parameters
==========
val: Union[float, int, array_like]
The value(s) to evaluate the CDF at

Returns
=======
Union[float, array_like]:
The CDF value(s)

References
==========

.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
result = (
(np.logaddexp(0, -self.r) - np.logaddexp(-val / self.sigma, -self.r))
/ np.logaddexp(0, self.r)
)
return np.clip(result, 0, 1)


class Categorical(Prior):
Expand Down
7 changes: 5 additions & 2 deletions bilby/core/prior/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@

import numpy as np
import scipy.stats
from scipy.interpolate import interp1d

from ..utils import (
infer_args_from_method,
BilbyJsonEncoder,
decode_bilby_json,
logger,
get_dict_with_properties,
WrappedInterp1d as interp1d,
)


Expand Down Expand Up @@ -178,7 +178,10 @@ def cdf(self, val):
cdf = cumulative_trapezoid(pdf, x, initial=0)
interp = interp1d(x, cdf, assume_sorted=True, bounds_error=False,
fill_value=(0, 1))
return interp(val)
output = interp(val)
if isinstance(val, (int, float)):
output = float(output)
return output

def ln_prob(self, val):
"""Return the prior ln probability of val, this should be overwritten
Expand Down
54 changes: 42 additions & 12 deletions bilby/core/prior/dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,9 @@ def check_efficiency(n_tested, n_valid):
def normalize_constraint_factor(
self, keys, min_accept=10000, sampling_chunk=50000, nrepeats=10
):
if keys in self._cached_normalizations.keys():
if len(self.constraint_keys) == 0:
return 1
elif keys in self._cached_normalizations.keys():
return self._cached_normalizations[keys]
else:
factor_estimates = [
Expand Down Expand Up @@ -549,8 +551,10 @@ def check_prob(self, sample, prob):
return 0.0
else:
constrained_prob = np.zeros_like(prob)
keep = np.array(self.evaluate_constraints(sample), dtype=bool)
constrained_prob[keep] = prob[keep] * ratio
in_bounds = np.isfinite(prob)
subsample = {key: sample[key][in_bounds] for key in sample}
keep = np.array(self.evaluate_constraints(subsample), dtype=bool)
constrained_prob[in_bounds] = prob[in_bounds] * keep * ratio
return constrained_prob

def ln_prob(self, sample, axis=None, normalized=True):
Expand Down Expand Up @@ -591,8 +595,10 @@ def check_ln_prob(self, sample, ln_prob, normalized=True):
return -np.inf
else:
constrained_ln_prob = -np.inf * np.ones_like(ln_prob)
keep = np.array(self.evaluate_constraints(sample), dtype=bool)
constrained_ln_prob[keep] = ln_prob[keep] + np.log(ratio)
in_bounds = np.isfinite(ln_prob)
subsample = {key: sample[key][in_bounds] for key in sample}
keep = np.log(np.array(self.evaluate_constraints(subsample), dtype=bool))
constrained_ln_prob[in_bounds] = ln_prob[in_bounds] + keep + np.log(ratio)
return constrained_ln_prob

def cdf(self, sample):
Expand Down Expand Up @@ -653,9 +659,7 @@ def test_has_redundant_keys(self):
del temp[key]
if temp.test_redundancy(key, disable_logging=True):
logger.warning(
"{} is a redundant key in this {}.".format(
key, self.__class__.__name__
)
f"{key} is a redundant key in this {self.__class__.__name__}."
)
redundant = True
return redundant
Expand Down Expand Up @@ -863,17 +867,43 @@ def rescale(self, keys, theta):
self._check_resolved()
self._update_rescale_keys(keys)
result = dict()
joint = dict()
for key, index in zip(
self.sorted_keys_without_fixed_parameters, self._rescale_indexes
):
result[key] = self[key].rescale(
theta[index], **self.get_required_variables(key)
)
self[key].least_recently_sampled = result[key]
samples = []
for key in keys:
samples += list(np.asarray(result[key]).flatten())
return samples
if isinstance(self[key], JointPrior) and self[key].dist.distname not in joint:
joint[self[key].dist.distname] = [key]
elif isinstance(self[key], JointPrior):
joint[self[key].dist.distname].append(key)
for names in joint.values():
# this is needed to unpack how joint prior rescaling works
# as an example of a joint prior over {a, b, c, d} we might
# get the following based on the order within the joint prior
# {a: [], b: [], c: [1, 2, 3, 4], d: []}
# -> [1, 2, 3, 4]
# -> {a: 1, b: 2, c: 3, d: 4}
values = list()
for key in names:
values = np.concatenate([values, result[key]])
for key, value in zip(names, values):
result[key] = value

def safe_flatten(value):
"""
this is gross but can be removed whenever we switch to returning
arrays, flatten converts 0-d arrays to 1-d so has to be special
cased
"""
if isinstance(value, (float, int)):
return value
else:
return result[key].flatten()

return [safe_flatten(result[key]) for key in keys]

def _update_rescale_keys(self, keys):
if not keys == self._least_recently_rescaled_keys:
Expand Down
8 changes: 2 additions & 6 deletions bilby/core/prior/interpolated.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import numpy as np
from scipy.interpolate import interp1d

from .base import Prior
from ..utils import logger
from ..utils import logger, WrappedInterp1d as interp1d


class Interped(Prior):
Expand Down Expand Up @@ -86,10 +85,7 @@ def rescale(self, val):

This maps to the inverse CDF. This is done using interpolation.
"""
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
return rescaled
return self.inverse_cumulative_distribution(val)

@property
def minimum(self):
Expand Down
16 changes: 7 additions & 9 deletions bilby/core/prior/slabspike.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,11 +88,11 @@ def rescale(self, val):
original_is_number = isinstance(val, Number)
val = np.atleast_1d(val)

lower_indices = np.where(val < self.inverse_cdf_below_spike)[0]
intermediate_indices = np.where(np.logical_and(
lower_indices = val < self.inverse_cdf_below_spike
intermediate_indices = np.logical_and(
self.inverse_cdf_below_spike <= val,
val <= self.inverse_cdf_below_spike + self.spike_height))[0]
higher_indices = np.where(val > self.inverse_cdf_below_spike + self.spike_height)[0]
val <= (self.inverse_cdf_below_spike + self.spike_height))
higher_indices = val > (self.inverse_cdf_below_spike + self.spike_height)

res = np.zeros(len(val))
res[lower_indices] = self._contracted_rescale(val[lower_indices])
Expand Down Expand Up @@ -137,7 +137,7 @@ def prob(self, val):
original_is_number = isinstance(val, Number)
res = self.slab.prob(val) * self.slab_fraction
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
res[val == self.spike_location] = np.inf
if original_is_number:
try:
res = res[0]
Expand All @@ -161,7 +161,7 @@ def ln_prob(self, val):
original_is_number = isinstance(val, Number)
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
res[val == self.spike_location] = np.inf
if original_is_number:
try:
res = res[0]
Expand All @@ -185,7 +185,5 @@ def cdf(self, val):

"""
res = self.slab.cdf(val) * self.slab_fraction
res = np.atleast_1d(res)
indices_above_spike = np.where(val > self.spike_location)[0]
res[indices_above_spike] += self.spike_height
res += self.spike_height * (val > self.spike_location)
return res
Loading
Loading