diff --git a/tests/checks.py b/tests/checks.py deleted file mode 100644 index 70d8816e3d..0000000000 --- a/tests/checks.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2023 The PyMC Developers -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - - -def close_to(x, v, bound, name="value"): - assert np.all(np.logical_or(np.abs(x - v) < bound, x == v)), ( - name + " out of bounds: " + repr(x) + ", " + repr(v) + ", " + repr(bound) - ) - - -def close_to_logical(x, v, bound, name="value"): - assert np.all(np.logical_or(np.abs(np.bitwise_xor(x, v)) < bound, x == v)), ( - name + " out of bounds: " + repr(x) + ", " + repr(v) + ", " + repr(bound) - ) diff --git a/tests/distributions/test_dist_math.py b/tests/distributions/test_dist_math.py index 793846fd11..99f22af31e 100644 --- a/tests/distributions/test_dist_math.py +++ b/tests/distributions/test_dist_math.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import numpy as np -import numpy.testing as npt import pytensor import pytensor.tensor as pt import pytest @@ -36,7 +35,6 @@ ) from pymc.logprob.utils import ParameterValueError from pymc.pytensorf import floatX -from tests.checks import close_to from tests.helpers import verify_grad @@ -160,7 +158,7 @@ def test_clipped_beta_rvs(dtype): def check_vals(fn1, fn2, *args): v = fn1(*args) - close_to(v, fn2(*args), 1e-6 if v.dtype == np.float64 else 1e-4) + np.testing.assert_allclose(v, fn2(*args), atol=1e-6 if v.dtype == np.float64 else 1e-4) def test_multigamma(): diff --git a/tests/distributions/test_transform.py b/tests/distributions/test_transform.py index bc3a5e41b4..e9027dcf3e 100644 --- a/tests/distributions/test_transform.py +++ b/tests/distributions/test_transform.py @@ -20,6 +20,7 @@ import pytensor.tensor as pt import pytest +from numpy.testing import assert_allclose, assert_array_equal from pytensor.tensor.variable import TensorConstant import pymc as pm @@ -40,7 +41,6 @@ UnitSortedVector, Vector, ) -from tests.checks import close_to, close_to_logical # some transforms (stick breaking) require addition of small slack in order to be numerically # stable. The minimal addable slack for float32 is higher thus we need to be less strict @@ -61,7 +61,7 @@ def check_transform(transform, domain, constructor=pt.scalar, test=0, rv_var=Non assert z.type == x.type identity_f = pytensor.function([x], z, *rv_inputs) for val in domain.vals: - close_to(val, identity_f(val), tol) + assert_allclose(val, identity_f(val), atol=tol) def check_vector_transform(transform, domain, rv_var=None): @@ -117,7 +117,7 @@ def check_jacobian_det( ) for yval in domain.vals: - np.testing.assert_allclose(actual_ljd(yval), computed_ljd(yval), rtol=tol) + assert_allclose(actual_ljd(yval), computed_ljd(yval), rtol=tol) def test_simplex(): @@ -132,9 +132,9 @@ def test_simplex(): def test_simplex_bounds(): vals = get_values(tr.simplex, Vector(R, 2), pt.vector, floatX(np.array([0, 0]))) - close_to(vals.sum(axis=1), 1, tol) - close_to_logical(vals > 0, True, tol) - close_to_logical(vals < 1, True, tol) + assert_allclose(vals.sum(axis=1), 1, tol) + assert_array_equal(vals > 0, True) + assert_array_equal(vals < 1, True) check_jacobian_det( tr.simplex, Vector(R, 2), pt.vector, floatX(np.array([0, 0])), lambda x: x[:-1] @@ -145,8 +145,8 @@ def test_simplex_accuracy(): val = floatX(np.array([-30])) x = pt.vector("x") x.tag.test_value = val - identity_f = pytensor.function([x], tr.simplex.forward(x, tr.simplex.backward(x, x))) - close_to(val, identity_f(val), tol) + identity_f = pytensor.function([x], tr.simplex.forward(tr.simplex.backward(x))) + assert_allclose(val, identity_f(val), tol) def test_sum_to_1(): @@ -179,7 +179,7 @@ def test_log(): check_jacobian_det(tr.log, Vector(Rplusbig, 2), pt.vector, [0, 0], elemwise=True) vals = get_values(tr.log) - close_to_logical(vals > 0, True, tol) + assert_array_equal(vals > 0, True) @pytest.mark.skipif( @@ -192,7 +192,7 @@ def test_log_exp_m1(): check_jacobian_det(tr.log_exp_m1, Vector(Rplusbig, 2), pt.vector, [0, 0], elemwise=True) vals = get_values(tr.log_exp_m1) - close_to_logical(vals > 0, True, tol) + assert_array_equal(vals > 0, True) def test_logodds(): @@ -202,8 +202,8 @@ def test_logodds(): check_jacobian_det(tr.logodds, Vector(Unit, 2), pt.vector, [0.5, 0.5], elemwise=True) vals = get_values(tr.logodds) - close_to_logical(vals > 0, True, tol) - close_to_logical(vals < 1, True, tol) + assert_array_equal(vals > 0, True) + assert_array_equal(vals < 1, True) def test_lowerbound(): @@ -214,7 +214,7 @@ def test_lowerbound(): check_jacobian_det(trans, Vector(Rplusbig, 2), pt.vector, [0, 0], elemwise=True) vals = get_values(trans) - close_to_logical(vals > 0, True, tol) + assert_array_equal(vals > 0, True) def test_upperbound(): @@ -225,7 +225,7 @@ def test_upperbound(): check_jacobian_det(trans, Vector(Rminusbig, 2), pt.vector, [-1, -1], elemwise=True) vals = get_values(trans) - close_to_logical(vals < 0, True, tol) + assert_array_equal(vals < 0, True) def test_interval(): @@ -238,8 +238,8 @@ def test_interval(): check_jacobian_det(trans, domain, elemwise=True) vals = get_values(trans) - close_to_logical(vals > a, True, tol) - close_to_logical(vals < b, True, tol) + assert_array_equal(vals > a, True) + assert_array_equal(vals < b, True) @pytest.mark.skipif( @@ -254,7 +254,7 @@ def test_interval_near_boundary(): pm.Uniform("x", initval=x0, lower=lb, upper=ub) log_prob = model.point_logps() - np.testing.assert_allclose(list(log_prob.values()), floatX(np.array([-52.68]))) + assert_allclose(list(log_prob.values()), floatX(np.array([-52.68]))) def test_circular(): @@ -264,8 +264,8 @@ def test_circular(): check_jacobian_det(trans, Circ) vals = get_values(trans) - close_to_logical(vals > -np.pi, True, tol) - close_to_logical(vals < np.pi, True, tol) + assert_array_equal(vals > -np.pi, True) + assert_array_equal(vals < np.pi, True) assert isinstance(trans.forward(1, None), TensorConstant) @@ -281,13 +281,13 @@ def test_ordered(): ) vals = get_values(tr.ordered, Vector(R, 3), pt.vector, floatX(np.zeros(3))) - close_to_logical(np.diff(vals) >= 0, True, tol) + assert_array_equal(np.diff(vals) >= 0, True) def test_chain_values(): chain_tranf = tr.Chain([tr.logodds, tr.ordered]) vals = get_values(chain_tranf, Vector(R, 5), pt.vector, floatX(np.zeros(5))) - close_to_logical(np.diff(vals) >= 0, True, tol) + assert_array_equal(np.diff(vals) >= 0, True) def test_chain_vector_transform(): @@ -339,7 +339,7 @@ def check_transform_elementwise_logp(self, model, vector_transform=False): untransform_logp_eval = untransform_logp.eval({x_val_untransf: test_array_untransf}) log_jac_det_eval = log_jac_det.eval({x_val_transf: test_array_transf}) # Summing the log_jac_det separately from the untransform_logp ensures there is no broadcasting between terms - np.testing.assert_allclose( + assert_allclose( transform_logp_eval.sum(), untransform_logp_eval.sum() + log_jac_det_eval.sum(), rtol=tol, diff --git a/tests/helpers.py b/tests/helpers.py index 6e4b4920c0..522fe6d6ae 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -23,12 +23,12 @@ import numpy.random as nr import pytensor +from numpy.testing import assert_array_less from pytensor.gradient import verify_grad as at_verify_grad import pymc as pm from pymc.testing import fast_unstable_sampling_mode -from tests.checks import close_to from tests.models import mv_simple, mv_simple_coarse @@ -118,11 +118,11 @@ def setup_class(self): def teardown_class(self): shutil.rmtree(self.temp_dir) - def check_stat(self, check, idata, name): + def check_stat(self, check, idata): group = idata.posterior for var, stat, value, bound in check: s = stat(group[var].sel(chain=0), axis=0) - close_to(s, value, bound, name) + assert_array_less(np.abs(s.values - value), bound) def check_stat_dtype(self, step, idata): # TODO: This check does not confirm the announced dtypes are correct as the @@ -156,7 +156,7 @@ def step_continuous(self, step_fn, draws, chains=1, tune=1000): assert idata.warmup_posterior.sizes["draw"] == tune assert idata.posterior.sizes["chain"] == chains assert idata.posterior.sizes["draw"] == draws - self.check_stat(check, idata, step.__class__.__name__) + self.check_stat(check, idata) self.check_stat_dtype(idata, step) diff --git a/tests/step_methods/test_metropolis.py b/tests/step_methods/test_metropolis.py index 9dd762cd82..7b1154f941 100644 --- a/tests/step_methods/test_metropolis.py +++ b/tests/step_methods/test_metropolis.py @@ -302,7 +302,7 @@ def test_step_discrete(self): model=model, random_seed=1, ) - self.check_stat(check, idata, step.__class__.__name__) + self.check_stat(check, idata) self.check_stat_dtype(idata, step) @pytest.mark.parametrize("proposal", ["uniform", "proportional"]) @@ -321,7 +321,7 @@ def test_step_categorical(self, proposal): model=model, random_seed=1, ) - self.check_stat(check, idata, step.__class__.__name__) + self.check_stat(check, idata) self.check_stat_dtype(idata, step) @pytest.mark.parametrize( diff --git a/tests/tuning/test_starting.py b/tests/tuning/test_starting.py index cfdb891891..8e30abe779 100644 --- a/tests/tuning/test_starting.py +++ b/tests/tuning/test_starting.py @@ -16,6 +16,8 @@ import numpy as np import pytest +from numpy.testing import assert_allclose + import pymc as pm from pymc.exceptions import ImputationWarning @@ -23,7 +25,6 @@ from pymc.testing import select_by_precision from pymc.tuning import find_MAP from tests import models -from tests.checks import close_to from tests.models import non_normal, simple_arbitrary_det, simple_model @@ -36,7 +37,7 @@ def test_mle_jacobian(bounded): start, model, _ = models.simple_normal(bounded_prior=bounded) with model: map_estimate = find_MAP(method="BFGS", model=model) - np.testing.assert_allclose(map_estimate["mu_i"], truth, rtol=rtol) + assert_allclose(map_estimate["mu_i"], truth, rtol=rtol) def test_tune_not_inplace(): @@ -50,14 +51,16 @@ def test_accuracy_normal(): _, model, (mu, _) = simple_model() with model: newstart = find_MAP(pm.Point(x=[-10.5, 100.5])) - close_to(newstart["x"], [mu, mu], select_by_precision(float64=1e-5, float32=1e-4)) + assert_allclose( + newstart["x"], [mu, mu], atol=select_by_precision(float64=1e-5, float32=1e-4) + ) def test_accuracy_non_normal(): _, model, (mu, _) = non_normal(4) with model: newstart = find_MAP(pm.Point(x=[0.5, 0.01, 0.95, 0.99])) - close_to(newstart["x"], mu, select_by_precision(float64=1e-5, float32=1e-4)) + assert_allclose(newstart["x"], mu, atol=select_by_precision(float64=1e-5, float32=1e-4)) def test_find_MAP_discrete(): @@ -76,9 +79,9 @@ def test_find_MAP_discrete(): map_est1 = find_MAP() map_est2 = find_MAP(vars=model.value_vars) - close_to(map_est1["p"], 0.6086956533498806, tol1) + assert_allclose(map_est1["p"], 0.6086956533498806, atol=tol1, rtol=0) - close_to(map_est2["p"], 0.695642178810167, tol2) + assert_allclose(map_est2["p"], 0.695642178810167, atol=tol2, rtol=0) assert map_est2["ss"] == 14 @@ -105,11 +108,11 @@ def test_find_MAP(): # Test non-gradient minimization map_est2 = find_MAP(progressbar=False, method="Powell") - close_to(map_est1["mu"], 0, tol) - close_to(map_est1["sigma"], 1, tol) + assert_allclose(map_est1["mu"], 0, atol=tol) + assert_allclose(map_est1["sigma"], 1, atol=tol) - close_to(map_est2["mu"], 0, tol) - close_to(map_est2["sigma"], 1, tol) + assert_allclose(map_est2["mu"], 0, atol=tol) + assert_allclose(map_est2["sigma"], 1, atol=tol) def test_find_MAP_issue_5923(): @@ -131,11 +134,11 @@ def test_find_MAP_issue_5923(): map_est1 = find_MAP(progressbar=False, vars=[mu, sigma], start=start) map_est2 = find_MAP(progressbar=False, vars=[sigma, mu], start=start) - close_to(map_est1["mu"], 0, tol) - close_to(map_est1["sigma"], 1, tol) + assert_allclose(map_est1["mu"], 0, atol=tol) + assert_allclose(map_est1["sigma"], 1, atol=tol) - close_to(map_est2["mu"], 0, tol) - close_to(map_est2["sigma"], 1, tol) + assert_allclose(map_est2["mu"], 0, atol=tol) + assert_allclose(map_est2["sigma"], 1, atol=tol) def test_find_MAP_issue_4488(): @@ -147,8 +150,8 @@ def test_find_MAP_issue_4488(): map_estimate = find_MAP() assert not set.difference({"x_unobserved", "x_unobserved_log__", "y"}, set(map_estimate.keys())) - np.testing.assert_allclose(map_estimate["x_unobserved"], 0.2, rtol=1e-4, atol=1e-4) - np.testing.assert_allclose(map_estimate["y"], [2.0, map_estimate["x_unobserved"][0] + 1]) + assert_allclose(map_estimate["x_unobserved"], 0.2, rtol=1e-4, atol=1e-4) + assert_allclose(map_estimate["y"], [2.0, map_estimate["x_unobserved"][0] + 1]) def test_find_MAP_warning_non_free_RVs(): @@ -161,4 +164,4 @@ def test_find_MAP_warning_non_free_RVs(): msg = "Intermediate variables (such as Deterministic or Potential) were passed" with pytest.warns(UserWarning, match=re.escape(msg)): r = pm.find_MAP(vars=[det]) - np.testing.assert_allclose([r["x"], r["y"], r["det"]], [50, 50, 100]) + assert_allclose([r["x"], r["y"], r["det"]], [50, 50, 100])