diff --git a/pymc3/distributions/continuous.py b/pymc3/distributions/continuous.py index af4fb1e8cc..d06400679c 100644 --- a/pymc3/distributions/continuous.py +++ b/pymc3/distributions/continuous.py @@ -34,6 +34,7 @@ halfnormal, invgamma, normal, + pareto, uniform, ) from aesara.tensor.random.op import RandomVariable @@ -2029,23 +2030,19 @@ class Pareto(Continuous): m: float Scale parameter (m > 0). """ + rv_op = pareto - def __init__(self, alpha, m, transform="lowerbound", *args, **kwargs): - self.alpha = alpha = at.as_tensor_variable(floatX(alpha)) - self.m = m = at.as_tensor_variable(floatX(m)) - - self.mean = at.switch(at.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf) - self.median = m * 2.0 ** (1.0 / alpha) - self.variance = at.switch( - at.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf - ) + @classmethod + def dist( + cls, alpha: float = None, m: float = None, no_assert: bool = False, **kwargs + ) -> RandomVariable: + alpha = at.as_tensor_variable(floatX(alpha)) + m = at.as_tensor_variable(floatX(m)) assert_negative_support(alpha, "alpha", "Pareto") assert_negative_support(m, "m", "Pareto") - if transform == "lowerbound": - transform = transforms.lowerbound(self.m) - super().__init__(transform=transform, *args, **kwargs) + return super().dist([alpha, m], **kwargs) def _random(self, alpha, m, size=None): u = np.random.uniform(size=size) @@ -2071,7 +2068,11 @@ def random(self, point=None, size=None): # alpha, m = draw_values([self.alpha, self.m], point=point, size=size) # return generate_samples(self._random, alpha, m, dist_shape=self.shape, size=size) - def logp(self, value): + def logp( + value: Union[float, np.ndarray, TensorVariable], + alpha: Union[float, np.ndarray, TensorVariable], + m: Union[float, np.ndarray, TensorVariable], + ): """ Calculate log-probability of Pareto distribution at specified value. @@ -2085,8 +2086,6 @@ def logp(self, value): ------- TensorVariable """ - alpha = self.alpha - m = self.m return bound( at.log(alpha) + logpow(m, alpha) - logpow(value, alpha + 1), value >= m, @@ -2097,7 +2096,11 @@ def logp(self, value): def _distr_parameters_for_repr(self): return ["alpha", "m"] - def logcdf(self, value): + def logcdf( + value: Union[float, np.ndarray, TensorVariable], + alpha: Union[float, np.ndarray, TensorVariable], + m: Union[float, np.ndarray, TensorVariable], + ): """ Compute the log of the cumulative distribution function for Pareto distribution at the specified value. @@ -2112,8 +2115,6 @@ def logcdf(self, value): ------- TensorVariable """ - m = self.m - alpha = self.alpha arg = (m / value) ** alpha return bound( at.switch( diff --git a/pymc3/tests/test_distributions.py b/pymc3/tests/test_distributions.py index dfb215aa53..84ed8d93c3 100644 --- a/pymc3/tests/test_distributions.py +++ b/pymc3/tests/test_distributions.py @@ -1402,7 +1402,6 @@ def test_fun(value, mu, sigma): decimal=select_by_precision(float64=4, float32=3), ) - @pytest.mark.xfail(reason="Distribution not refactored yet") def test_pareto(self): self.check_logp( Pareto, diff --git a/pymc3/tests/test_distributions_random.py b/pymc3/tests/test_distributions_random.py index 0e7259bafc..3d677460a0 100644 --- a/pymc3/tests/test_distributions_random.py +++ b/pymc3/tests/test_distributions_random.py @@ -342,12 +342,6 @@ class TestStudentT(BaseTestCases.BaseTestCase): params = {"nu": 5.0, "mu": 0.0, "lam": 1.0} -@pytest.mark.xfail(reason="This distribution has not been refactored for v4") -class TestPareto(BaseTestCases.BaseTestCase): - distribution = pm.Pareto - params = {"alpha": 0.5, "m": 1.0} - - @pytest.mark.skip(reason="This test is covered by Aesara") class TestCauchy(BaseTestCases.BaseTestCase): distribution = pm.Cauchy @@ -681,13 +675,6 @@ def ref_rand(size, alpha, beta): pymc3_random(pm.InverseGamma, {"alpha": Rplus, "beta": Rplus}, ref_rand=ref_rand) - @pytest.mark.xfail(reason="This distribution has not been refactored for v4") - def test_pareto(self): - def ref_rand(size, alpha, m): - return st.pareto.rvs(alpha, scale=m, size=size) - - pymc3_random(pm.Pareto, {"alpha": Rplusbig, "m": Rplusbig}, ref_rand=ref_rand) - @pytest.mark.xfail(reason="This distribution has not been refactored for v4") def test_ex_gaussian(self): def ref_rand(size, mu, sigma, nu):