diff --git a/benches/distributions.rs b/benches/distributions.rs index 28e90a87892..ff4a781d5bc 100644 --- a/benches/distributions.rs +++ b/benches/distributions.rs @@ -197,15 +197,15 @@ distr_float!(distr_openclosed01_f32, f32, OpenClosed01); distr_float!(distr_openclosed01_f64, f64, OpenClosed01); // distributions -distr_float!(distr_exp, f64, Exp::new(1.23 * 4.56)); -distr_float!(distr_normal, f64, Normal::new(-1.23, 4.56)); -distr_float!(distr_log_normal, f64, LogNormal::new(-1.23, 4.56)); -distr_float!(distr_gamma_large_shape, f64, Gamma::new(10., 1.0)); -distr_float!(distr_gamma_small_shape, f64, Gamma::new(0.1, 1.0)); -distr_float!(distr_cauchy, f64, Cauchy::new(4.2, 6.9)); -distr_int!(distr_binomial, u64, Binomial::new(20, 0.7)); -distr_int!(distr_binomial_small, u64, Binomial::new(1000000, 1e-30)); -distr_int!(distr_poisson, u64, Poisson::new(4.0)); +distr_float!(distr_exp, f64, Exp::new(1.23 * 4.56).unwrap()); +distr_float!(distr_normal, f64, Normal::new(-1.23, 4.56).unwrap()); +distr_float!(distr_log_normal, f64, LogNormal::new(-1.23, 4.56).unwrap()); +distr_float!(distr_gamma_large_shape, f64, Gamma::new(10., 1.0).unwrap()); +distr_float!(distr_gamma_small_shape, f64, Gamma::new(0.1, 1.0).unwrap()); +distr_float!(distr_cauchy, f64, Cauchy::new(4.2, 6.9).unwrap()); +distr_int!(distr_binomial, u64, Binomial::new(20, 0.7).unwrap()); +distr_int!(distr_binomial_small, u64, Binomial::new(1000000, 1e-30).unwrap()); +distr_int!(distr_poisson, u64, Poisson::new(4.0).unwrap()); distr!(distr_bernoulli, bool, Bernoulli::new(0.18)); distr_arr!(distr_circle, [f64; 2], UnitCircle::new()); distr_arr!(distr_sphere_surface, [f64; 3], UnitSphereSurface::new()); @@ -279,7 +279,7 @@ gen_range_float!(gen_range_f64, f64, 123.456f64, 7890.12); #[bench] fn dist_iter(b: &mut Bencher) { let mut rng = SmallRng::from_entropy(); - let distr = Normal::new(-2.71828, 3.14159); + let distr = Normal::new(-2.71828, 3.14159).unwrap(); let mut iter = distr.sample_iter(&mut rng); b.iter(|| { diff --git a/rand_distr/src/binomial.rs b/rand_distr/src/binomial.rs index 2b6d1f1e4d6..c4c7ee1e1da 100644 --- a/rand_distr/src/binomial.rs +++ b/rand_distr/src/binomial.rs @@ -23,7 +23,7 @@ use crate::utils::log_gamma; /// ``` /// use rand_distr::{Binomial, Distribution}; /// -/// let bin = Binomial::new(20, 0.3); +/// let bin = Binomial::new(20, 0.3).unwrap(); /// let v = bin.sample(&mut rand::thread_rng()); /// println!("{} is from a binomial distribution", v); /// ``` @@ -35,15 +35,26 @@ pub struct Binomial { p: f64, } +/// Error type returned from `Binomial::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `p < 0` or `nan`. + ProbabilityTooSmall, + /// `p > 1`. + ProbabilityTooLarge, +} + impl Binomial { /// Construct a new `Binomial` with the given shape parameters `n` (number /// of trials) and `p` (probability of success). - /// - /// Panics if `p < 0` or `p > 1`. - pub fn new(n: u64, p: f64) -> Binomial { - assert!(p >= 0.0, "Binomial::new called with p < 0"); - assert!(p <= 1.0, "Binomial::new called with p > 1"); - Binomial { n, p } + pub fn new(n: u64, p: f64) -> Result { + if !(p >= 0.0) { + return Err(Error::ProbabilityTooSmall); + } + if !(p <= 1.0) { + return Err(Error::ProbabilityTooLarge); + } + Ok(Binomial { n, p }) } } @@ -101,7 +112,7 @@ impl Distribution for Binomial { // we use the Cauchy distribution as the comparison distribution // f(x) ~ 1/(1+x^2) - let cauchy = Cauchy::new(0.0, 1.0); + let cauchy = Cauchy::new(0.0, 1.0).unwrap(); loop { let mut comp_dev: f64; loop { @@ -148,7 +159,7 @@ mod test { use super::Binomial; fn test_binomial_mean_and_variance(n: u64, p: f64, rng: &mut R) { - let binomial = Binomial::new(n, p); + let binomial = Binomial::new(n, p).unwrap(); let expected_mean = n as f64 * p; let expected_variance = n as f64 * p * (1.0 - p); @@ -178,13 +189,13 @@ mod test { #[test] fn test_binomial_end_points() { let mut rng = crate::test::rng(352); - assert_eq!(rng.sample(Binomial::new(20, 0.0)), 0); - assert_eq!(rng.sample(Binomial::new(20, 1.0)), 20); + assert_eq!(rng.sample(Binomial::new(20, 0.0).unwrap()), 0); + assert_eq!(rng.sample(Binomial::new(20, 1.0).unwrap()), 20); } #[test] #[should_panic] fn test_binomial_invalid_lambda_neg() { - Binomial::new(20, -10.0); + Binomial::new(20, -10.0).unwrap(); } } diff --git a/rand_distr/src/cauchy.rs b/rand_distr/src/cauchy.rs index 49707a501f4..5ad36ae5f4b 100644 --- a/rand_distr/src/cauchy.rs +++ b/rand_distr/src/cauchy.rs @@ -23,7 +23,7 @@ use std::f64::consts::PI; /// ``` /// use rand_distr::{Cauchy, Distribution}; /// -/// let cau = Cauchy::new(2.0, 5.0); +/// let cau = Cauchy::new(2.0, 5.0).unwrap(); /// let v = cau.sample(&mut rand::thread_rng()); /// println!("{} is from a Cauchy(2, 5) distribution", v); /// ``` @@ -33,16 +33,24 @@ pub struct Cauchy { scale: f64 } +/// Error type returned from `Cauchy::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `scale <= 0` or `nan`. + ScaleTooSmall, +} + impl Cauchy { /// Construct a new `Cauchy` with the given shape parameters /// `median` the peak location and `scale` the scale factor. - /// Panics if `scale <= 0`. - pub fn new(median: f64, scale: f64) -> Cauchy { - assert!(scale > 0.0, "Cauchy::new called with scale factor <= 0"); - Cauchy { + pub fn new(median: f64, scale: f64) -> Result { + if !(scale > 0.0) { + return Err(Error::ScaleTooSmall); + } + Ok(Cauchy { median, scale - } + }) } } @@ -76,7 +84,7 @@ mod test { #[test] fn test_cauchy_median() { - let cauchy = Cauchy::new(10.0, 5.0); + let cauchy = Cauchy::new(10.0, 5.0).unwrap(); let mut rng = crate::test::rng(123); let mut numbers: [f64; 1000] = [0.0; 1000]; for i in 0..1000 { @@ -89,7 +97,7 @@ mod test { #[test] fn test_cauchy_mean() { - let cauchy = Cauchy::new(10.0, 5.0); + let cauchy = Cauchy::new(10.0, 5.0).unwrap(); let mut rng = crate::test::rng(123); let mut sum = 0.0; for _ in 0..1000 { @@ -104,12 +112,12 @@ mod test { #[test] #[should_panic] fn test_cauchy_invalid_scale_zero() { - Cauchy::new(0.0, 0.0); + Cauchy::new(0.0, 0.0).unwrap(); } #[test] #[should_panic] fn test_cauchy_invalid_scale_neg() { - Cauchy::new(0.0, -10.0); + Cauchy::new(0.0, -10.0).unwrap(); } } diff --git a/rand_distr/src/dirichlet.rs b/rand_distr/src/dirichlet.rs index 6a34cd088fe..b4b4acd83e5 100644 --- a/rand_distr/src/dirichlet.rs +++ b/rand_distr/src/dirichlet.rs @@ -25,47 +25,60 @@ use crate::gamma::Gamma; /// use rand::prelude::*; /// use rand_distr::Dirichlet; /// -/// let dirichlet = Dirichlet::new(vec![1.0, 2.0, 3.0]); +/// let dirichlet = Dirichlet::new(vec![1.0, 2.0, 3.0]).unwrap(); /// let samples = dirichlet.sample(&mut rand::thread_rng()); /// println!("{:?} is from a Dirichlet([1.0, 2.0, 3.0]) distribution", samples); /// ``` - #[derive(Clone, Debug)] pub struct Dirichlet { /// Concentration parameters (alpha) alpha: Vec, } +/// Error type returned from `Dirchlet::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `alpha.len() < 2`. + AlphaTooShort, + /// `alpha <= 0.0` or `nan`. + AlphaTooSmall, + /// `size < 2`. + SizeTooSmall, +} + impl Dirichlet { /// Construct a new `Dirichlet` with the given alpha parameter `alpha`. /// - /// # Panics - /// - if `alpha.len() < 2` - /// + /// Requires `alpha.len() >= 2`. #[inline] - pub fn new>>(alpha: V) -> Dirichlet { + pub fn new>>(alpha: V) -> Result { let a = alpha.into(); - assert!(a.len() > 1); + if a.len() < 2 { + return Err(Error::AlphaTooShort); + } for i in 0..a.len() { - assert!(a[i] > 0.0); + if !(a[i] > 0.0) { + return Err(Error::AlphaTooSmall); + } } - Dirichlet { alpha: a } + Ok(Dirichlet { alpha: a }) } /// Construct a new `Dirichlet` with the given shape parameter `alpha` and `size`. /// - /// # Panics - /// - if `alpha <= 0.0` - /// - if `size < 2` - /// + /// Requires `size >= 2`. #[inline] - pub fn new_with_param(alpha: f64, size: usize) -> Dirichlet { - assert!(alpha > 0.0); - assert!(size > 1); - Dirichlet { - alpha: vec![alpha; size], + pub fn new_with_param(alpha: f64, size: usize) -> Result { + if !(alpha > 0.0) { + return Err(Error::AlphaTooSmall); } + if size < 2 { + return Err(Error::SizeTooSmall); + } + Ok(Dirichlet { + alpha: vec![alpha; size], + }) } } @@ -76,7 +89,7 @@ impl Distribution> for Dirichlet { let mut sum = 0.0f64; for i in 0..n { - let g = Gamma::new(self.alpha[i], 1.0); + let g = Gamma::new(self.alpha[i], 1.0).unwrap(); samples[i] = g.sample(rng); sum += samples[i]; } @@ -95,7 +108,7 @@ mod test { #[test] fn test_dirichlet() { - let d = Dirichlet::new(vec![1.0, 2.0, 3.0]); + let d = Dirichlet::new(vec![1.0, 2.0, 3.0]).unwrap(); let mut rng = crate::test::rng(221); let samples = d.sample(&mut rng); let _: Vec = samples @@ -111,7 +124,7 @@ mod test { fn test_dirichlet_with_param() { let alpha = 0.5f64; let size = 2; - let d = Dirichlet::new_with_param(alpha, size); + let d = Dirichlet::new_with_param(alpha, size).unwrap(); let mut rng = crate::test::rng(221); let samples = d.sample(&mut rng); let _: Vec = samples @@ -126,12 +139,12 @@ mod test { #[test] #[should_panic] fn test_dirichlet_invalid_length() { - Dirichlet::new_with_param(0.5f64, 1); + Dirichlet::new_with_param(0.5f64, 1).unwrap(); } #[test] #[should_panic] fn test_dirichlet_invalid_alpha() { - Dirichlet::new_with_param(0.0f64, 2); + Dirichlet::new_with_param(0.0f64, 2).unwrap(); } } diff --git a/rand_distr/src/exponential.rs b/rand_distr/src/exponential.rs index 23cbd2e9d22..35dc0930b94 100644 --- a/rand_distr/src/exponential.rs +++ b/rand_distr/src/exponential.rs @@ -71,7 +71,7 @@ impl Distribution for Exp1 { /// ``` /// use rand_distr::{Exp, Distribution}; /// -/// let exp = Exp::new(2.0); +/// let exp = Exp::new(2.0).unwrap(); /// let v = exp.sample(&mut rand::thread_rng()); /// println!("{} is from a Exp(2) distribution", v); /// ``` @@ -81,13 +81,22 @@ pub struct Exp { lambda_inverse: f64 } +/// Error type returned from `Exp::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `lambda <= 0` or `nan`. + LambdaTooSmall, +} + impl Exp { /// Construct a new `Exp` with the given shape parameter - /// `lambda`. Panics if `lambda <= 0`. + /// `lambda`. #[inline] - pub fn new(lambda: f64) -> Exp { - assert!(lambda > 0.0, "Exp::new called with `lambda` <= 0"); - Exp { lambda_inverse: 1.0 / lambda } + pub fn new(lambda: f64) -> Result { + if !(lambda > 0.0) { + return Err(Error::LambdaTooSmall); + } + Ok(Exp { lambda_inverse: 1.0 / lambda }) } } @@ -105,7 +114,7 @@ mod test { #[test] fn test_exp() { - let exp = Exp::new(10.0); + let exp = Exp::new(10.0).unwrap(); let mut rng = crate::test::rng(221); for _ in 0..1000 { assert!(exp.sample(&mut rng) >= 0.0); @@ -114,11 +123,11 @@ mod test { #[test] #[should_panic] fn test_exp_invalid_lambda_zero() { - Exp::new(0.0); + Exp::new(0.0).unwrap(); } #[test] #[should_panic] fn test_exp_invalid_lambda_neg() { - Exp::new(-10.0); + Exp::new(-10.0).unwrap(); } } diff --git a/rand_distr/src/gamma.rs b/rand_distr/src/gamma.rs index b16cdb2ce6c..7ddc1fb13fa 100644 --- a/rand_distr/src/gamma.rs +++ b/rand_distr/src/gamma.rs @@ -37,7 +37,7 @@ use crate::{Distribution, Exp, Open01}; /// ``` /// use rand_distr::{Distribution, Gamma}; /// -/// let gamma = Gamma::new(2.0, 5.0); +/// let gamma = Gamma::new(2.0, 5.0).unwrap(); /// let v = gamma.sample(&mut rand::thread_rng()); /// println!("{} is from a Gamma(2, 5) distribution", v); /// ``` @@ -51,6 +51,17 @@ pub struct Gamma { repr: GammaRepr, } +/// Error type returned from `Gamma::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `shape <= 0` or `nan`. + ShapeTooSmall, + /// `scale <= 0` or `nan`. + ScaleTooSmall, + /// `1 / scale == 0`. + ScaleTooLarge, +} + #[derive(Clone, Copy, Debug)] enum GammaRepr { Large(GammaLargeShape), @@ -92,21 +103,23 @@ struct GammaLargeShape { impl Gamma { /// Construct an object representing the `Gamma(shape, scale)` /// distribution. - /// - /// Panics if `shape <= 0` or `scale <= 0`. #[inline] - pub fn new(shape: f64, scale: f64) -> Gamma { - assert!(shape > 0.0, "Gamma::new called with shape <= 0"); - assert!(scale > 0.0, "Gamma::new called with scale <= 0"); + pub fn new(shape: f64, scale: f64) -> Result { + if !(shape > 0.0) { + return Err(Error::ShapeTooSmall); + } + if !(scale > 0.0) { + return Err(Error::ScaleTooSmall); + } let repr = if shape == 1.0 { - One(Exp::new(1.0 / scale)) + One(Exp::new(1.0 / scale).map_err(|_| Error::ScaleTooLarge)?) } else if shape < 1.0 { Small(GammaSmallShape::new_raw(shape, scale)) } else { Large(GammaLargeShape::new_raw(shape, scale)) }; - Gamma { repr } + Ok(Gamma { repr }) } } @@ -180,7 +193,7 @@ impl Distribution for GammaLargeShape { /// ``` /// use rand_distr::{ChiSquared, Distribution}; /// -/// let chi = ChiSquared::new(11.0); +/// let chi = ChiSquared::new(11.0).unwrap(); /// let v = chi.sample(&mut rand::thread_rng()); /// println!("{} is from a χ²(11) distribution", v) /// ``` @@ -189,6 +202,13 @@ pub struct ChiSquared { repr: ChiSquaredRepr, } +/// Error type returned from `ChiSquared::new` and `StudentT::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ChiSquaredError { + /// `0.5 * k <= 0` or `nan`. + DoFTooSmall, +} + #[derive(Clone, Copy, Debug)] enum ChiSquaredRepr { // k == 1, Gamma(alpha, ..) is particularly slow for alpha < 1, @@ -200,15 +220,17 @@ enum ChiSquaredRepr { impl ChiSquared { /// Create a new chi-squared distribution with degrees-of-freedom - /// `k`. Panics if `k < 0`. - pub fn new(k: f64) -> ChiSquared { + /// `k`. + pub fn new(k: f64) -> Result { let repr = if k == 1.0 { DoFExactlyOne } else { - assert!(k > 0.0, "ChiSquared::new called with `k` < 0"); - DoFAnythingElse(Gamma::new(0.5 * k, 2.0)) + if !(0.5 * k > 0.0) { + return Err(ChiSquaredError::DoFTooSmall); + } + DoFAnythingElse(Gamma::new(0.5 * k, 2.0).unwrap()) }; - ChiSquared { repr } + Ok(ChiSquared { repr }) } } impl Distribution for ChiSquared { @@ -235,7 +257,7 @@ impl Distribution for ChiSquared { /// ``` /// use rand_distr::{FisherF, Distribution}; /// -/// let f = FisherF::new(2.0, 32.0); +/// let f = FisherF::new(2.0, 32.0).unwrap(); /// let v = f.sample(&mut rand::thread_rng()); /// println!("{} is from an F(2, 32) distribution", v) /// ``` @@ -248,18 +270,30 @@ pub struct FisherF { dof_ratio: f64, } +/// Error type returned from `FisherF::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum FisherFError { + /// `m <= 0` or `nan`. + MTooSmall, + /// `n <= 0` or `nan`. + NTooSmall, +} + impl FisherF { - /// Create a new `FisherF` distribution, with the given - /// parameter. Panics if either `m` or `n` are not positive. - pub fn new(m: f64, n: f64) -> FisherF { - assert!(m > 0.0, "FisherF::new called with `m < 0`"); - assert!(n > 0.0, "FisherF::new called with `n < 0`"); - - FisherF { - numer: ChiSquared::new(m), - denom: ChiSquared::new(n), - dof_ratio: n / m + /// Create a new `FisherF` distribution, with the given parameter. + pub fn new(m: f64, n: f64) -> Result { + if !(m > 0.0) { + return Err(FisherFError::MTooSmall); + } + if !(n > 0.0) { + return Err(FisherFError::NTooSmall); } + + Ok(FisherF { + numer: ChiSquared::new(m).unwrap(), + denom: ChiSquared::new(n).unwrap(), + dof_ratio: n / m + }) } } impl Distribution for FisherF { @@ -276,7 +310,7 @@ impl Distribution for FisherF { /// ``` /// use rand_distr::{StudentT, Distribution}; /// -/// let t = StudentT::new(11.0); +/// let t = StudentT::new(11.0).unwrap(); /// let v = t.sample(&mut rand::thread_rng()); /// println!("{} is from a t(11) distribution", v) /// ``` @@ -288,13 +322,12 @@ pub struct StudentT { impl StudentT { /// Create a new Student t distribution with `n` degrees of - /// freedom. Panics if `n <= 0`. - pub fn new(n: f64) -> StudentT { - assert!(n > 0.0, "StudentT::new called with `n <= 0`"); - StudentT { - chi: ChiSquared::new(n), + /// freedom. + pub fn new(n: f64) -> Result { + Ok(StudentT { + chi: ChiSquared::new(n)?, dof: n - } + }) } } impl Distribution for StudentT { @@ -311,7 +344,7 @@ impl Distribution for StudentT { /// ``` /// use rand_distr::{Distribution, Beta}; /// -/// let beta = Beta::new(2.0, 5.0); +/// let beta = Beta::new(2.0, 5.0).unwrap(); /// let v = beta.sample(&mut rand::thread_rng()); /// println!("{} is from a Beta(2, 5) distribution", v); /// ``` @@ -321,17 +354,25 @@ pub struct Beta { gamma_b: Gamma, } +/// Error type returned from `Beta::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BetaError { + /// `alpha <= 0` or `nan`. + AlphaTooSmall, + /// `beta <= 0` or `nan`. + BetaTooSmall, +} + impl Beta { /// Construct an object representing the `Beta(alpha, beta)` /// distribution. - /// - /// Panics if `shape <= 0` or `scale <= 0`. - pub fn new(alpha: f64, beta: f64) -> Beta { - assert!((alpha > 0.) & (beta > 0.)); - Beta { - gamma_a: Gamma::new(alpha, 1.), - gamma_b: Gamma::new(beta, 1.), - } + pub fn new(alpha: f64, beta: f64) -> Result { + Ok(Beta { + gamma_a: Gamma::new(alpha, 1.) + .map_err(|_| BetaError::AlphaTooSmall)?, + gamma_b: Gamma::new(beta, 1.) + .map_err(|_| BetaError::BetaTooSmall)?, + }) } } @@ -350,7 +391,7 @@ mod test { #[test] fn test_chi_squared_one() { - let chi = ChiSquared::new(1.0); + let chi = ChiSquared::new(1.0).unwrap(); let mut rng = crate::test::rng(201); for _ in 0..1000 { chi.sample(&mut rng); @@ -358,7 +399,7 @@ mod test { } #[test] fn test_chi_squared_small() { - let chi = ChiSquared::new(0.5); + let chi = ChiSquared::new(0.5).unwrap(); let mut rng = crate::test::rng(202); for _ in 0..1000 { chi.sample(&mut rng); @@ -366,7 +407,7 @@ mod test { } #[test] fn test_chi_squared_large() { - let chi = ChiSquared::new(30.0); + let chi = ChiSquared::new(30.0).unwrap(); let mut rng = crate::test::rng(203); for _ in 0..1000 { chi.sample(&mut rng); @@ -375,12 +416,12 @@ mod test { #[test] #[should_panic] fn test_chi_squared_invalid_dof() { - ChiSquared::new(-1.0); + ChiSquared::new(-1.0).unwrap(); } #[test] fn test_f() { - let f = FisherF::new(2.0, 32.0); + let f = FisherF::new(2.0, 32.0).unwrap(); let mut rng = crate::test::rng(204); for _ in 0..1000 { f.sample(&mut rng); @@ -389,7 +430,7 @@ mod test { #[test] fn test_t() { - let t = StudentT::new(11.0); + let t = StudentT::new(11.0).unwrap(); let mut rng = crate::test::rng(205); for _ in 0..1000 { t.sample(&mut rng); @@ -398,7 +439,7 @@ mod test { #[test] fn test_beta() { - let beta = Beta::new(1.0, 2.0); + let beta = Beta::new(1.0, 2.0).unwrap(); let mut rng = crate::test::rng(201); for _ in 0..1000 { beta.sample(&mut rng); @@ -408,6 +449,6 @@ mod test { #[test] #[should_panic] fn test_beta_invalid_dof() { - Beta::new(0., 0.); + Beta::new(0., 0.).unwrap(); } } diff --git a/rand_distr/src/lib.rs b/rand_distr/src/lib.rs index b012cc33851..20104eb50cb 100644 --- a/rand_distr/src/lib.rs +++ b/rand_distr/src/lib.rs @@ -63,17 +63,17 @@ pub use rand::distributions::{Distribution, DistIter, Standard, pub use self::unit_sphere::UnitSphereSurface; pub use self::unit_circle::UnitCircle; -pub use self::gamma::{Gamma, ChiSquared, FisherF, - StudentT, Beta}; -pub use self::normal::{Normal, LogNormal, StandardNormal}; -pub use self::exponential::{Exp, Exp1}; -pub use self::pareto::Pareto; -pub use self::poisson::Poisson; -pub use self::binomial::Binomial; -pub use self::cauchy::Cauchy; -pub use self::dirichlet::Dirichlet; -pub use self::triangular::Triangular; -pub use self::weibull::Weibull; +pub use self::gamma::{Gamma, Error as GammaError, ChiSquared, ChiSquaredError, + FisherF, FisherFError, StudentT, Beta, BetaError}; +pub use self::normal::{Normal, Error as NormalError, LogNormal, StandardNormal}; +pub use self::exponential::{Exp, Error as ExpError, Exp1}; +pub use self::pareto::{Pareto, Error as ParetoError}; +pub use self::poisson::{Poisson, Error as PoissonError}; +pub use self::binomial::{Binomial, Error as BinomialError}; +pub use self::cauchy::{Cauchy, Error as CauchyError}; +pub use self::dirichlet::{Dirichlet, Error as DirichletError}; +pub use self::triangular::{Triangular, Error as TriangularError}; +pub use self::weibull::{Weibull, Error as WeibullError}; mod unit_sphere; mod unit_circle; diff --git a/rand_distr/src/normal.rs b/rand_distr/src/normal.rs index 27da152e14e..436e7dfd9ac 100644 --- a/rand_distr/src/normal.rs +++ b/rand_distr/src/normal.rs @@ -86,7 +86,7 @@ impl Distribution for StandardNormal { /// use rand_distr::{Normal, Distribution}; /// /// // mean 2, standard deviation 3 -/// let normal = Normal::new(2.0, 3.0); +/// let normal = Normal::new(2.0, 3.0).unwrap(); /// let v = normal.sample(&mut rand::thread_rng()); /// println!("{} is from a N(2, 9) distribution", v) /// ``` @@ -98,20 +98,25 @@ pub struct Normal { std_dev: f64, } +/// Error type returned from `Normal::new` and `LogNormal::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `std_dev < 0` or `nan`. + StdDevTooSmall, +} + impl Normal { /// Construct a new `Normal` distribution with the given mean and /// standard deviation. - /// - /// # Panics - /// - /// Panics if `std_dev < 0`. #[inline] - pub fn new(mean: f64, std_dev: f64) -> Normal { - assert!(std_dev >= 0.0, "Normal::new called with `std_dev` < 0"); - Normal { + pub fn new(mean: f64, std_dev: f64) -> Result { + if !(std_dev >= 0.0) { + return Err(Error::StdDevTooSmall); + } + Ok(Normal { mean, std_dev - } + }) } } impl Distribution for Normal { @@ -133,7 +138,7 @@ impl Distribution for Normal { /// use rand_distr::{LogNormal, Distribution}; /// /// // mean 2, standard deviation 3 -/// let log_normal = LogNormal::new(2.0, 3.0); +/// let log_normal = LogNormal::new(2.0, 3.0).unwrap(); /// let v = log_normal.sample(&mut rand::thread_rng()); /// println!("{} is from an ln N(2, 9) distribution", v) /// ``` @@ -144,15 +149,13 @@ pub struct LogNormal { impl LogNormal { /// Construct a new `LogNormal` distribution with the given mean - /// and standard deviation. - /// - /// # Panics - /// - /// Panics if `std_dev < 0`. + /// and standard deviation of the logarithm of the distribution. #[inline] - pub fn new(mean: f64, std_dev: f64) -> LogNormal { - assert!(std_dev >= 0.0, "LogNormal::new called with `std_dev` < 0"); - LogNormal { norm: Normal::new(mean, std_dev) } + pub fn new(mean: f64, std_dev: f64) -> Result { + if !(std_dev >= 0.0) { + return Err(Error::StdDevTooSmall); + } + Ok(LogNormal { norm: Normal::new(mean, std_dev).unwrap() }) } } impl Distribution for LogNormal { @@ -168,7 +171,7 @@ mod tests { #[test] fn test_normal() { - let norm = Normal::new(10.0, 10.0); + let norm = Normal::new(10.0, 10.0).unwrap(); let mut rng = crate::test::rng(210); for _ in 0..1000 { norm.sample(&mut rng); @@ -177,13 +180,13 @@ mod tests { #[test] #[should_panic] fn test_normal_invalid_sd() { - Normal::new(10.0, -1.0); + Normal::new(10.0, -1.0).unwrap(); } #[test] fn test_log_normal() { - let lnorm = LogNormal::new(10.0, 10.0); + let lnorm = LogNormal::new(10.0, 10.0).unwrap(); let mut rng = crate::test::rng(211); for _ in 0..1000 { lnorm.sample(&mut rng); @@ -192,6 +195,6 @@ mod tests { #[test] #[should_panic] fn test_log_normal_invalid_sd() { - LogNormal::new(10.0, -1.0); + LogNormal::new(10.0, -1.0).unwrap(); } } diff --git a/rand_distr/src/pareto.rs b/rand_distr/src/pareto.rs index b1f91b5378a..59977940915 100644 --- a/rand_distr/src/pareto.rs +++ b/rand_distr/src/pareto.rs @@ -18,7 +18,7 @@ use crate::{Distribution, OpenClosed01}; /// use rand::prelude::*; /// use rand_distr::Pareto; /// -/// let val: f64 = SmallRng::from_entropy().sample(Pareto::new(1., 2.)); +/// let val: f64 = SmallRng::from_entropy().sample(Pareto::new(1., 2.).unwrap()); /// println!("{}", val); /// ``` #[derive(Clone, Copy, Debug)] @@ -27,18 +27,28 @@ pub struct Pareto { inv_neg_shape: f64, } +/// Error type returned from `Pareto::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `scale <= 0` or `nan`. + ScaleTooSmall, + /// `shape <= 0` or `nan`. + ShapeTooSmall, +} + impl Pareto { /// Construct a new Pareto distribution with given `scale` and `shape`. /// /// In the literature, `scale` is commonly written as xm or k and /// `shape` is often written as α. - /// - /// # Panics - /// - /// `scale` and `shape` have to be non-zero and positive. - pub fn new(scale: f64, shape: f64) -> Pareto { - assert!((scale > 0.) & (shape > 0.)); - Pareto { scale, inv_neg_shape: -1.0 / shape } + pub fn new(scale: f64, shape: f64) -> Result { + if !(scale > 0.0) { + return Err(Error::ScaleTooSmall); + } + if !(shape > 0.0) { + return Err(Error::ShapeTooSmall); + } + Ok(Pareto { scale, inv_neg_shape: -1.0 / shape }) } } @@ -57,14 +67,14 @@ mod tests { #[test] #[should_panic] fn invalid() { - Pareto::new(0., 0.); + Pareto::new(0., 0.).unwrap(); } #[test] fn sample() { let scale = 1.0; let shape = 2.0; - let d = Pareto::new(scale, shape); + let d = Pareto::new(scale, shape).unwrap(); let mut rng = crate::test::rng(1); for _ in 0..1000 { let r = d.sample(&mut rng); diff --git a/rand_distr/src/poisson.rs b/rand_distr/src/poisson.rs index 3b4c9080459..96bc8b9b6b4 100644 --- a/rand_distr/src/poisson.rs +++ b/rand_distr/src/poisson.rs @@ -23,7 +23,7 @@ use crate::utils::log_gamma; /// ``` /// use rand_distr::{Poisson, Distribution}; /// -/// let poi = Poisson::new(2.0); +/// let poi = Poisson::new(2.0).unwrap(); /// let v = poi.sample(&mut rand::thread_rng()); /// println!("{} is from a Poisson(2) distribution", v); /// ``` @@ -37,19 +37,28 @@ pub struct Poisson { magic_val: f64, } +/// Error type returned from `Poisson::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `lambda <= 0` or `nan`. + ShapeTooSmall, +} + impl Poisson { /// Construct a new `Poisson` with the given shape parameter - /// `lambda`. Panics if `lambda <= 0`. - pub fn new(lambda: f64) -> Poisson { - assert!(lambda > 0.0, "Poisson::new called with lambda <= 0"); + /// `lambda`. + pub fn new(lambda: f64) -> Result { + if !(lambda > 0.0) { + return Err(Error::ShapeTooSmall); + } let log_lambda = lambda.ln(); - Poisson { + Ok(Poisson { lambda, exp_lambda: (-lambda).exp(), log_lambda, sqrt_2lambda: (2.0 * lambda).sqrt(), magic_val: lambda * log_lambda - log_gamma(1.0 + lambda), - } + }) } } @@ -73,7 +82,7 @@ impl Distribution for Poisson { // we use the Cauchy distribution as the comparison distribution // f(x) ~ 1/(1+x^2) - let cauchy = Cauchy::new(0.0, 1.0); + let cauchy = Cauchy::new(0.0, 1.0).unwrap(); loop { let mut result; @@ -118,7 +127,7 @@ mod test { #[test] fn test_poisson_10() { - let poisson = Poisson::new(10.0); + let poisson = Poisson::new(10.0).unwrap(); let mut rng = crate::test::rng(123); let mut sum = 0; for _ in 0..1000 { @@ -132,7 +141,7 @@ mod test { #[test] fn test_poisson_15() { // Take the 'high expected values' path - let poisson = Poisson::new(15.0); + let poisson = Poisson::new(15.0).unwrap(); let mut rng = crate::test::rng(123); let mut sum = 0; for _ in 0..1000 { @@ -146,12 +155,12 @@ mod test { #[test] #[should_panic] fn test_poisson_invalid_lambda_zero() { - Poisson::new(0.0); + Poisson::new(0.0).unwrap(); } #[test] #[should_panic] fn test_poisson_invalid_lambda_neg() { - Poisson::new(-10.0); + Poisson::new(-10.0).unwrap(); } } diff --git a/rand_distr/src/triangular.rs b/rand_distr/src/triangular.rs index 42c5587d91e..1f7de8afb2d 100644 --- a/rand_distr/src/triangular.rs +++ b/rand_distr/src/triangular.rs @@ -17,7 +17,7 @@ use crate::{Distribution, Standard}; /// ```rust /// use rand_distr::{Triangular, Distribution}; /// -/// let d = Triangular::new(0., 5., 2.5); +/// let d = Triangular::new(0., 5., 2.5).unwrap(); /// let v = d.sample(&mut rand::thread_rng()); /// println!("{} is from a triangular distribution", v); /// ``` @@ -28,20 +28,32 @@ pub struct Triangular { mode: f64, } +/// Error type returned from `Triangular::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `max < mode` or `max` is `nan`. + MaxTooSmall, + /// `mode < min` or `mode` is `nan`. + ModeTooSmall, + /// `max == min` or `min` is `nan`. + MaxEqualsMin, +} + impl Triangular { /// Construct a new `Triangular` with minimum `min`, maximum `max` and mode /// `mode`. - /// - /// # Panics - /// - /// If `max < mode`, `mode < max` or `max == min`. - /// #[inline] - pub fn new(min: f64, max: f64, mode: f64) -> Triangular { - assert!(max >= mode); - assert!(mode >= min); - assert!(max != min); - Triangular { min, max, mode } + pub fn new(min: f64, max: f64, mode: f64) -> Result { + if !(max >= mode) { + return Err(Error::MaxTooSmall); + } + if !(mode >= min) { + return Err(Error::ModeTooSmall); + } + if !(max != min) { + return Err(Error::MaxEqualsMin); + } + Ok(Triangular { min, max, mode }) } } @@ -71,13 +83,13 @@ mod test { (0., 1., 0.9), (-4., -0.5, -2.), (-13.039, 8.41, 1.17), ] { println!("{} {} {}", min, max, mode); - let _ = Triangular::new(min, max, mode); + let _ = Triangular::new(min, max, mode).unwrap(); } } #[test] fn test_sample() { - let norm = Triangular::new(0., 1., 0.5); + let norm = Triangular::new(0., 1., 0.5).unwrap(); let mut rng = crate::test::rng(1); for _ in 0..1000 { norm.sample(&mut rng); diff --git a/rand_distr/src/weibull.rs b/rand_distr/src/weibull.rs index 020adb4eb13..8d3c66d8919 100644 --- a/rand_distr/src/weibull.rs +++ b/rand_distr/src/weibull.rs @@ -18,7 +18,7 @@ use crate::{Distribution, OpenClosed01}; /// use rand::prelude::*; /// use rand_distr::Weibull; /// -/// let val: f64 = SmallRng::from_entropy().sample(Weibull::new(1., 10.)); +/// let val: f64 = SmallRng::from_entropy().sample(Weibull::new(1., 10.).unwrap()); /// println!("{}", val); /// ``` #[derive(Clone, Copy, Debug)] @@ -27,15 +27,25 @@ pub struct Weibull { scale: f64, } +/// Error type returned from `Weibull::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Error { + /// `scale <= 0` or `nan`. + ScaleTooSmall, + /// `shape <= 0` or `nan`. + ShapeTooSmall, +} + impl Weibull { /// Construct a new `Weibull` distribution with given `scale` and `shape`. - /// - /// # Panics - /// - /// `scale` and `shape` have to be non-zero and positive. - pub fn new(scale: f64, shape: f64) -> Weibull { - assert!((scale > 0.) & (shape > 0.)); - Weibull { inv_shape: 1./shape, scale } + pub fn new(scale: f64, shape: f64) -> Result { + if !(scale > 0.0) { + return Err(Error::ScaleTooSmall); + } + if !(shape > 0.0) { + return Err(Error::ShapeTooSmall); + } + Ok(Weibull { inv_shape: 1./shape, scale }) } } @@ -54,14 +64,14 @@ mod tests { #[test] #[should_panic] fn invalid() { - Weibull::new(0., 0.); + Weibull::new(0., 0.).unwrap(); } #[test] fn sample() { let scale = 1.0; let shape = 2.0; - let d = Weibull::new(scale, shape); + let d = Weibull::new(scale, shape).unwrap(); let mut rng = crate::test::rng(1); for _ in 0..1000 { let r = d.sample(&mut rng);