diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 9cffa3c111..b45eaeb96f 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -345,6 +345,11 @@ def logcdf(value, lower, upper): msg="lower <= upper", ) + def icdf(value, lower, upper): + res = lower + (upper - lower) * value + res = check_icdf_value(res, value) + return check_icdf_parameters(res, lower < upper) + @_default_transform.register(Uniform) def uniform_default_transform(op, rv): diff --git a/pymc/distributions/discrete.py b/pymc/distributions/discrete.py index f0af73ea66..fa39ac0370 100644 --- a/pymc/distributions/discrete.py +++ b/pymc/distributions/discrete.py @@ -1058,6 +1058,15 @@ def logcdf(value, lower, upper): msg="lower <= upper", ) + def icdf(value, lower, upper): + res = pt.ceil(value * (upper - lower + 1)).astype("int64") + lower - 1 + res = check_icdf_value(res, value) + return check_icdf_parameters( + res, + lower <= upper, + msg="lower <= upper", + ) + class Categorical(Discrete): R""" @@ -1572,7 +1581,7 @@ class OrderedLogistic: # Ordered logistic regression with pm.Model() as model: cutpoints = pm.Normal("cutpoints", mu=[-1,1], sigma=10, shape=2, - transform=pm.distributions.transforms.ordered) + transform=pm.distributions.transforms.univariate_ordered) y_ = pm.OrderedLogistic("y", cutpoints=cutpoints, eta=x, observed=y) idata = pm.sample() diff --git a/pymc/gp/gp.py b/pymc/gp/gp.py index b2c415ee02..e8a695787a 100644 --- a/pymc/gp/gp.py +++ b/pymc/gp/gp.py @@ -111,10 +111,10 @@ class Latent(Base): Parameters ---------- - cov_func: None, 2D array, or instance of Covariance - The covariance function. Defaults to zero. - mean_func: None, instance of Mean - The mean function. Defaults to zero. + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + cov_func : 2D array-like, or Covariance, default ~pymc.gp.cov.Constant + The covariance function. Examples -------- @@ -171,18 +171,20 @@ def prior(self, name, X, reparameterize=True, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - X: array-like - Function input values. - reparameterize: bool + X : array-like + Function input values. If one-dimensional, must be a column + vector with shape `(n, 1)`. + reparameterize : bool, default True Reparameterize the distribution by rotating the random variable by the Cholesky factor of the covariance matrix. - jitter: scalar + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to distribution constructor. + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` + distribution constructor. """ f = self._build_prior(name, X, reparameterize, jitter, **kwargs) @@ -233,19 +235,20 @@ def conditional(self, name, Xnew, given=None, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like - Function input values. - given: dict - Can optionally take as key value pairs: `X`, `y`, - and `gp`. See the section in the documentation on additive GP - models in PyMC for more information. - jitter: scalar + Xnew : array-like + Function input values. If one-dimensional, must be a column + vector with shape `(n, 1)`. + given : dict, optional + Can take as key value pairs: `X`, `y`, + and `gp`. See the :ref:`section ` in the documentation + on additive GP models in pymc for more information. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ givens = self._get_given_vals(given) @@ -260,7 +263,7 @@ class TP(Latent): The usage is nearly identical to that of `gp.Latent`. The differences are that it must be initialized with a degrees of freedom parameter, and - TP is not additive. Given a mean and covariance function, and a degrees of + TP is not additive. Given a mean and covariance function, and a degrees of freedom parameter, the function :math:`f(x)` is modeled as, .. math:: @@ -270,10 +273,12 @@ class TP(Latent): Parameters ---------- - scale_func : None, 2D array, or instance of Covariance - The scale function. Defaults to zero. - mean_func : None, instance of Mean - The mean function. Defaults to zero. + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + scale_func : 2D array-like, or Covariance, default ~pymc.gp.cov.Constant + The covariance function. + cov_func : 2D array-like, or Covariance, default None + Deprecated, previous version of "scale_func" nu : float The degrees of freedom @@ -320,15 +325,20 @@ def prior(self, name, X, reparameterize=True, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - X: array-like - Function input values. - reparameterize: bool + X : array-like + Function input values. If one-dimensional, must be a column + vector with shape `(n, 1)`. + reparameterize : bool, default True Reparameterize the distribution by rotating the random variable by the Cholesky factor of the covariance matrix. + jitter : float, default 1e-6 + A small correction added to the diagonal of positive semi-definite + covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to distribution constructor. + Extra keyword arguments that are passed to :class:`~pymc.MvStudentT` + distribution constructor. """ f = self._build_prior(name, X, reparameterize, jitter, **kwargs) @@ -361,15 +371,16 @@ def conditional(self, name, Xnew, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like - Function input values. - jitter: scalar + Xnew : array-like + Function input values. If one-dimensional, must be a column + vector with shape `(n, 1)`. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvStudentT` distribution constructor. """ @@ -388,14 +399,15 @@ class Marginal(Base): prior and additive noise. It has `marginal_likelihood`, `conditional` and `predict` methods. This GP implementation can be used to implement regression on data that is normally distributed. For more - information on the `prior` and `conditional` methods, see their docstrings. + information on the `marginal_likelihood`, `conditional` + and `predict` methods, see their docstrings. Parameters ---------- - cov_func: None, 2D array, or instance of Covariance - The covariance function. Defaults to zero. - mean_func: None, instance of Mean - The mean function. Defaults to zero. + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + cov_func : 2D array-like, or Covariance, default ~pymc.gp.cov.Constant + The covariance function. Examples -------- @@ -439,7 +451,7 @@ def marginal_likelihood( Returns the marginal likelihood distribution, given the input locations `X` and the data `y`. - This is integral over the product of the GP prior and a normal likelihood. + This is the integral over the product of the GP prior and a normal likelihood. .. math:: @@ -447,24 +459,26 @@ def marginal_likelihood( Parameters ---------- - name: string + name : str Name of the random variable - X: array-like + X : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - y: array-like + y : array-like Data that is the sum of the function with the GP prior and Gaussian noise. Must have shape `(n, )`. - sigma: scalar, Variable, or Covariance + sigma : float, Variable, or Covariance, default ~pymc.gp.cov.WhiteNoise Standard deviation of the Gaussian noise. Can also be a Covariance for non-white noise. - noise: scalar, Variable, or Covariance - Previous parameterization of `sigma`. - jitter: scalar + noise : float, Variable, or Covariance, optional + Deprecated. Previous parameterization of `sigma`. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. + is_observed : bool, default True + Deprecated. Whether to set `y` as an `observed` variable in the `model`. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ sigma = _handle_sigma_noise_parameters(sigma=sigma, noise=noise) @@ -548,23 +562,22 @@ def conditional( Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - pred_noise: bool + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. - given: dict - Can optionally take as key value pairs: `X`, `y`, `sigma`, - and `gp`. See the section in the documentation on additive GP - models in PyMC for more information. - jitter: scalar + given : dict, optional + Can take key value pairs: `X`, `y`, `sigma`, + and `gp`. See the :ref:`section ` in the documentation + on additive GP models in pymc for more information. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ @@ -589,22 +602,27 @@ def predict( Parameters ---------- - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - point: pymc.model.Point + point : pymc.Point, optional A specific point to condition on. - diag: bool + diag : bool, default False If `True`, return the diagonal instead of the full covariance - matrix. Default is `False`. - pred_noise: bool + matrix. + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. - given: dict - Same as `conditional` method. - jitter: scalar + given : dict, optional + Can take key value pairs: `X`, `y`, `sigma`, + and `gp`. See the :ref:`section ` in the documentation + on additive GP models in pymc for more information. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. + model : Model, optional + Model with the Gaussian Process component for which predictions will + be generated. It is optional when inside a with context, otherwise + it is required. """ if given is None: given = {} @@ -618,17 +636,18 @@ def _predict_at(self, Xnew, diag=False, pred_noise=False, given=None, jitter=JIT Parameters ---------- - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - diag: bool + diag : bool, default False If `True`, return the diagonal instead of the full covariance - matrix. Default is `False`. - pred_noise: bool + matrix. + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. - given: dict - Same as `conditional` method. + given : dict, optional + Can take key value pairs: `X`, `y`, `sigma`, + and `gp`. See the :ref:`section ` in the documentation + on additive GP models in pymc for more information. """ givens = self._get_given_vals(given) mu, cov = self._build_conditional(Xnew, pred_noise, diag, *givens, jitter) @@ -652,13 +671,12 @@ class MarginalApprox(Marginal): Parameters ---------- - cov_func: None, 2D array, or instance of Covariance - The covariance function. Defaults to zero. - mean_func: None, instance of Mean - The mean function. Defaults to zero. - approx: string + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + cov_func : 2D array-like, or Covariance, default ~pymc.gp.cov.Constant + The covariance function. + approx : str, default 'VFE' The approximation to use. Must be one of `VFE`, `FITC` or `DTC`. - Default is VFE. Examples -------- @@ -756,25 +774,25 @@ def marginal_likelihood( Parameters ---------- - name: string + name : str Name of the random variable - X: array-like + X : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - Xu: array-like + Xu : array-like The inducing points. Must have the same number of columns as `X`. - y: array-like + y : array-like Data that is the sum of the function with the GP prior and Gaussian noise. Must have shape `(n, )`. - sigma: scalar, Variable + sigma : float, Variable Standard deviation of the Gaussian noise. - noise: scalar, Variable - Previous parameterization of `sigma` - jitter: scalar + noise : float, Variable, optional + Previous parameterization of `sigma`. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ @@ -848,23 +866,22 @@ def conditional( Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - pred_noise: bool + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. - given: dict - Can optionally take as key value pairs: `X`, `Xu`, `y`, `sigma`, - and `gp`. See the section in the documentation on additive GP - models in PyMC for more information. - jitter: scalar + given : dict, optional + Can take key value pairs: `X`, `Xu`, `y`, `sigma`, + and `gp`. See the :ref:`section ` in the documentation + on additive GP models in pymc for more information. + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ @@ -892,20 +909,19 @@ class LatentKron(Base): Kronecker structured covariance, without reference to any noise or specific likelihood. The GP is constructed with the `prior` method, and the conditional GP over new input locations is constructed with - the `conditional` method. `conditional` and method. For more + the `conditional` method. For more information on these methods, see their docstrings. This GP implementation can be used to model a Gaussian process whose inputs cover evenly spaced grids on more than one dimension. `LatentKron` - is relies on the `KroneckerNormal` distribution, see its docstring + relies on the `KroneckerNormal` distribution, see its docstring for more information. Parameters ---------- - cov_funcs: list of Covariance objects + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + cov_funcs : list of Covariance, default [~pymc.gp.cov.Constant] The covariance functions that compose the tensor (Kronecker) product. - Defaults to [zero]. - mean_func: None, instance of Mean - The mean function. Defaults to zero. Examples -------- @@ -963,18 +979,18 @@ def prior(self, name, Xs, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xs: list of array-like + Xs : list of array-like Function input values for each covariance function. Each entry must be passable to its respective covariance without error. The total covariance function is measured on the full grid `cartesian(*Xs)`. - jitter: scalar + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to the `KroneckerNormal` + Extra keyword arguments that are passed to the :class:`~pymc.KroneckerNormal` distribution constructor. """ if len(Xs) != len(self.cov_funcs): @@ -1024,16 +1040,16 @@ def conditional(self, name, Xnew, jitter=JITTER_DEFAULT, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - jitter: scalar + jitter : float, default 1e-6 A small correction added to the diagonal of positive semi-definite covariance matrices to ensure numerical stability. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ mu, cov = self._build_conditional(Xnew, jitter) @@ -1053,15 +1069,15 @@ class MarginalKron(Base): are measured on a full grid of inputs: `cartesian(*Xs)`. `MarginalKron` is based on the `KroneckerNormal` distribution, see its docstring for more information. For more information on the - `prior` and `conditional` methods, see their docstrings. + `marginal_likelihood`, `conditional` and `predict` methods, + see their docstrings. Parameters ---------- - cov_funcs: list of Covariance objects + mean_func : Mean, default ~pymc.gp.mean.Zero + The mean function. + cov_funcs : list of Covariance, default [~pymc.gp.cov.Constant] The covariance functions that compose the tensor (Kronecker) product. - Defaults to [zero]. - mean_func: None, instance of Mean - The mean function. Defaults to zero. Examples -------- @@ -1131,23 +1147,22 @@ def marginal_likelihood(self, name, Xs, y, sigma, is_observed=True, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xs: list of array-like + Xs : list of array-like Function input values for each covariance function. Each entry must be passable to its respective covariance without error. The total covariance function is measured on the full grid `cartesian(*Xs)`. - y: array-like + y : array-like Data that is the sum of the function with the GP prior and Gaussian noise. Must have shape `(n, )`. - sigma: scalar, Variable + sigma : float, Variable Standard deviation of the white Gaussian noise. - is_observed: bool - Whether to set `y` as an `observed` variable in the `model`. - Default is `True`. + is_observed : bool, default True + Deprecated. Whether to set `y` as an `observed` variable in the `model`. **kwargs - Extra keyword arguments that are passed to `KroneckerNormal` + Extra keyword arguments that are passed to :class:`~pymc.KroneckerNormal` distribution constructor. """ self._check_inputs(Xs, y) @@ -1226,16 +1241,15 @@ def conditional(self, name, Xnew, pred_noise=False, diag=False, **kwargs): Parameters ---------- - name: string + name : str Name of the random variable - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - pred_noise: bool + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. **kwargs - Extra keyword arguments that are passed to `MvNormal` distribution + Extra keyword arguments that are passed to :class:`~pymc.MvNormal` distribution constructor. """ mu, cov = self._build_conditional(Xnew, diag, pred_noise) @@ -1249,17 +1263,20 @@ def predict(self, Xnew, point=None, diag=False, pred_noise=False, model=None): Parameters ---------- - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - point: pymc.model.Point + point : pymc.Point, optional A specific point to condition on. - diag: bool + diag : bool, default False If `True`, return the diagonal instead of the full covariance - matrix. Default is `False`. - pred_noise: bool + matrix. + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. + model : Model, optional + Model with the Gaussian Process component for which predictions will + be generated. It is optional when inside a with context, otherwise + it is required. """ mu, cov = self._predict_at(Xnew, diag, pred_noise) return replace_with_values([mu, cov], replacements=point, model=model) @@ -1271,15 +1288,14 @@ def _predict_at(self, Xnew, diag=False, pred_noise=False): Parameters ---------- - Xnew: array-like + Xnew : array-like Function input values. If one-dimensional, must be a column vector with shape `(n, 1)`. - diag: bool + diag : bool, default False If `True`, return the diagonal instead of the full covariance - matrix. Default is `False`. - pred_noise: bool + matrix. + pred_noise : bool, default False Whether or not observation noise is included in the conditional. - Default is `False`. """ mu, cov = self._build_conditional(Xnew, diag, pred_noise) return mu, cov diff --git a/pymc/sampling/forward.py b/pymc/sampling/forward.py index a03c598c9e..fd0260c3ae 100644 --- a/pymc/sampling/forward.py +++ b/pymc/sampling/forward.py @@ -481,8 +481,7 @@ def sample_posterior_predictive( Whether to automatically use :meth:`arviz.InferenceData.extend` to add the posterior predictive samples to ``trace`` or not. If True, ``trace`` is modified inplace but still returned. predictions : bool, default False - Choose the function used to convert the samples to inferencedata. See ``idata_kwargs`` - for more details. + Flag used to set the location of posterior predictive samples within the returned ``arviz.InferenceData`` object. If False, assumes samples are generated based on the fitting data to be used for posterior predictive checks, and samples are stored in the ``posterior_predictive``. If True, assumes samples are generated based on out-of-sample data as predictions, and samples are stored in the ``predictions`` group. idata_kwargs : dict, optional Keyword arguments for :func:`pymc.to_inference_data` if ``predictions=False`` or to :func:`pymc.predictions_to_inference_data` otherwise. diff --git a/pymc/testing.py b/pymc/testing.py index ea3ccfc46f..3bb222222f 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -526,6 +526,7 @@ def check_icdf( pymc_dist: Distribution, paramdomains: Dict[str, Domain], scipy_icdf: Callable, + skip_paramdomain_outside_edge_test=False, decimal: Optional[int] = None, n_samples: int = 100, ) -> None: @@ -548,7 +549,7 @@ def check_icdf( paramdomains : Dictionary of Parameter : Domain pairs Supported domains of distribution parameters scipy_icdf : Scipy icdf method - Scipy icdf (ppp) method of equivalent pymc_dist distribution + Scipy icdf (ppf) method of equivalent pymc_dist distribution decimal : int, optional Level of precision with which pymc_dist and scipy_icdf are compared. Defaults to 6 for float64 and 3 for float32 @@ -557,6 +558,9 @@ def check_icdf( are compared between pymc and scipy methods. If n_samples is below the total number of combinations, a random subset is evaluated. Setting n_samples = -1, will return all possible combinations. Defaults to 100 + skip_paradomain_outside_edge_test : Bool + Whether to run test 2., which checks that pymc distribution icdf + returns nan for invalid parameter values outside the supported domain edge """ if decimal is None: @@ -586,19 +590,20 @@ def check_icdf( valid_params = {param: paramdomain.vals[0] for param, paramdomain in paramdomains.items()} valid_params["q"] = valid_value - # Test pymc distribution raises ParameterValueError for parameters outside the - # supported domain edges (excluding edges) - invalid_params = find_invalid_scalar_params(paramdomains) - for invalid_param, invalid_edges in invalid_params.items(): - for invalid_edge in invalid_edges: - if invalid_edge is None: - continue + if not skip_paramdomain_outside_edge_test: + # Test pymc distribution raises ParameterValueError for parameters outside the + # supported domain edges (excluding edges) + invalid_params = find_invalid_scalar_params(paramdomains) + for invalid_param, invalid_edges in invalid_params.items(): + for invalid_edge in invalid_edges: + if invalid_edge is None: + continue - point = valid_params.copy() - point[invalid_param] = invalid_edge - with pytest.raises(ParameterValueError): - pymc_icdf(**point) - pytest.fail(f"test_params={point}") + point = valid_params.copy() + point[invalid_param] = invalid_edge + with pytest.raises(ParameterValueError): + pymc_icdf(**point) + pytest.fail(f"test_params={point}") # Test that values below 0 or above 1 evaluate to nan invalid_values = find_invalid_scalar_params({"q": domain})["q"] diff --git a/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py index 8b4d2ef2b0..8b4484a66c 100644 --- a/tests/distributions/test_continuous.py +++ b/tests/distributions/test_continuous.py @@ -26,9 +26,9 @@ import pymc as pm -from pymc.distributions.continuous import Normal, get_tau_sigma, interpolated +from pymc.distributions.continuous import Normal, Uniform, get_tau_sigma, interpolated from pymc.distributions.dist_math import clipped_beta_rvs -from pymc.logprob.abstract import logcdf +from pymc.logprob.abstract import icdf, logcdf from pymc.logprob.joint_logprob import logp from pymc.logprob.utils import ParameterValueError from pymc.pytensorf import floatX @@ -176,6 +176,12 @@ def test_uniform(self): lambda value, lower, upper: st.uniform.logcdf(value, lower, upper - lower), skip_paramdomain_outside_edge_test=True, ) + check_icdf( + pm.Uniform, + {"lower": -Rplusunif, "upper": Rplusunif}, + lambda q, lower, upper: st.uniform.ppf(q=q, loc=lower, scale=upper - lower), + skip_paramdomain_outside_edge_test=True, + ) # Custom logp / logcdf check for invalid parameters invalid_dist = pm.Uniform.dist(lower=1, upper=0) with pytensor.config.change_flags(mode=Mode("py")): @@ -183,6 +189,8 @@ def test_uniform(self): logp(invalid_dist, np.array(0.5)).eval() with pytest.raises(ParameterValueError): logcdf(invalid_dist, np.array(0.5)).eval() + with pytest.raises(ParameterValueError): + icdf(invalid_dist, np.array(0.5)).eval() def test_triangular(self): check_logp( diff --git a/tests/distributions/test_discrete.py b/tests/distributions/test_discrete.py index f3152268ea..78dbd7999b 100644 --- a/tests/distributions/test_discrete.py +++ b/tests/distributions/test_discrete.py @@ -29,7 +29,7 @@ import pymc as pm from pymc.distributions.discrete import Geometric, _OrderedLogistic, _OrderedProbit -from pymc.logprob.abstract import logcdf +from pymc.logprob.abstract import icdf, logcdf from pymc.logprob.joint_logprob import logp from pymc.logprob.utils import ParameterValueError from pymc.pytensorf import floatX @@ -118,6 +118,12 @@ def test_discrete_unif(self): Domain([-10, 0, 10], "int64"), {"lower": -Rplusdunif, "upper": Rplusdunif}, ) + check_icdf( + pm.DiscreteUniform, + {"lower": -Rplusdunif, "upper": Rplusdunif}, + lambda q, lower, upper: st.randint.ppf(q=q, low=lower, high=upper + 1), + skip_paramdomain_outside_edge_test=True, + ) # Custom logp / logcdf check for invalid parameters invalid_dist = pm.DiscreteUniform.dist(lower=1, upper=0) with pytensor.config.change_flags(mode=Mode("py")): @@ -125,6 +131,8 @@ def test_discrete_unif(self): logp(invalid_dist, 0.5).eval() with pytest.raises(ParameterValueError): logcdf(invalid_dist, 2).eval() + with pytest.raises(ParameterValueError): + icdf(invalid_dist, np.array(1)).eval() def test_geometric(self): check_logp(