Skip to content

Get rid of FutureWarnings related to Model.initial_point #5273

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pymc/backends/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def __init__(self, name, model=None, vars=None, test_point=None):
# Get variable shapes. Most backends will need this
# information.
if test_point is None:
test_point = model.initial_point
test_point = model.recompute_initial_point()
else:
test_point_ = model.initial_point.copy()
test_point_ = model.recompute_initial_point().copy()
test_point_.update(test_point)
test_point = test_point_
var_values = list(zip(self.varnames, self.fn(test_point)))
Expand Down
18 changes: 9 additions & 9 deletions pymc/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
)
from pymc.blocking import DictToArrayBijection, RaveledVars
from pymc.data import GenTensorVariable, Minibatch
from pymc.distributions import logp_transform, logpt, logpt_sum
from pymc.distributions import logp_transform, logpt
from pymc.exceptions import ImputationWarning, SamplingError, ShapeError
from pymc.initial_point import make_initial_point_fn
from pymc.math import flatten_list
Expand Down Expand Up @@ -1538,7 +1538,7 @@ def profile(self, outs, n=1000, point=None, profile=True, *args, **kwargs):
"""
f = self.makefn(outs, profile=profile, *args, **kwargs)
if point is None:
point = self.initial_point
point = self.recompute_initial_point()

for _ in range(n):
f(**point)
Expand Down Expand Up @@ -1697,17 +1697,17 @@ def point_logps(self, point=None, round_vals=2):
Pandas Series
"""
if point is None:
point = self.initial_point
point = self.recompute_initial_point()

return Series(
{
rv.name: np.round(
np.asarray(
self.fn(logpt_sum(rv, getattr(rv.tag, "observations", None)))(point)
),
round_vals,
rv.name: np.round(np.asarray(logp), round_vals)
for rv, logp in zip(
self.basic_RVs,
self.fn(
[at.sum(factor) for factor in self.logp_elemwiset(vars=self.basic_RVs)]
)(point),
)
for rv in self.basic_RVs
},
name="Log-probability of test_point",
)
Expand Down
3 changes: 2 additions & 1 deletion pymc/sampling_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,8 @@ def sample_numpyro_nuts(
print("Compiling...", file=sys.stdout)

rv_names = [rv.name for rv in model.value_vars]
init_state = [model.initial_point[rv_name] for rv_name in rv_names]
initial_point = model.recompute_initial_point()
init_state = [initial_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)

logp_fn = get_jaxified_logp(model)
Expand Down
2 changes: 1 addition & 1 deletion pymc/step_methods/hmc/base_hmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def __init__(
# size.
# XXX: If the dimensions of these terms change, the step size
# dimension-scaling should change as well, no?
test_point = self._model.initial_point
test_point = self._model.recompute_initial_point()

nuts_vars = [test_point[v.name] for v in vars]
size = sum(v.size for v in nuts_vars)
Expand Down
10 changes: 5 additions & 5 deletions pymc/step_methods/metropolis.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def __init__(
"""

model = pm.modelcontext(model)
initial_values = model.initial_point
initial_values = model.recompute_initial_point()

if vars is None:
vars = model.value_vars
Expand Down Expand Up @@ -425,7 +425,7 @@ def __init__(self, vars, order="random", transit_p=0.8, model=None):
# transition probabilities
self.transit_p = transit_p

initial_point = model.initial_point
initial_point = model.recompute_initial_point()
vars = [model.rvs_to_values.get(var, var) for var in vars]
self.dim = sum(initial_point[v.name].size for v in vars)

Expand Down Expand Up @@ -510,7 +510,7 @@ def __init__(self, vars, proposal="uniform", order="random", model=None):
vars = [model.rvs_to_values.get(var, var) for var in vars]
vars = pm.inputvars(vars)

initial_point = model.initial_point
initial_point = model.recompute_initial_point()

dimcats = []
# The above variable is a list of pairs (aggregate dimension, number
Expand Down Expand Up @@ -710,7 +710,7 @@ def __init__(
):

model = pm.modelcontext(model)
initial_values = model.initial_point
initial_values = model.recompute_initial_point()
initial_values_size = sum(initial_values[n.name].size for n in model.value_vars)

if vars is None:
Expand Down Expand Up @@ -861,7 +861,7 @@ def __init__(
**kwargs
):
model = pm.modelcontext(model)
initial_values = model.initial_point
initial_values = model.recompute_initial_point()
initial_values_size = sum(initial_values[n.name].size for n in model.value_vars)

if vars is None:
Expand Down
6 changes: 3 additions & 3 deletions pymc/step_methods/mlda.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(self, *args, **kwargs):
and some extra code specific for MLDA.
"""
model = pm.modelcontext(kwargs.get("model", None))
initial_values = model.initial_point
initial_values = model.recompute_initial_point()

# flag to that variance reduction is activated - forces MetropolisMLDA
# to store quantities of interest in a register if True
Expand Down Expand Up @@ -114,7 +114,7 @@ def __init__(self, *args, **kwargs):
self.tuning_end_trigger = False

model = pm.modelcontext(kwargs.get("model", None))
initial_values = model.initial_point
initial_values = model.recompute_initial_point()

# flag to that variance reduction is activated - forces DEMetropolisZMLDA
# to store quantities of interest in a register if True
Expand Down Expand Up @@ -381,7 +381,7 @@ def __init__(

# assign internal state
model = pm.modelcontext(model)
initial_values = model.initial_point
initial_values = model.recompute_initial_point()
self.model = model
self.coarse_models = coarse_models
self.model_below = self.coarse_models[-1]
Expand Down
30 changes: 15 additions & 15 deletions pymc/tests/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def simple_model():
with Model() as model:
Normal("x", mu, tau=tau, size=2, initval=floatX_array([0.1, 0.1]))

return model.initial_point, model, (mu, tau ** -0.5)
return model.recompute_initial_point(), model, (mu, tau ** -0.5)


def simple_categorical():
Expand All @@ -43,7 +43,7 @@ def simple_categorical():

mu = np.dot(p, v)
var = np.dot(p, (v - mu) ** 2)
return model.initial_point, model, (mu, var)
return model.recompute_initial_point(), model, (mu, var)


def multidimensional_model():
Expand All @@ -52,7 +52,7 @@ def multidimensional_model():
with Model() as model:
Normal("x", mu, tau=tau, size=(3, 2), initval=0.1 * np.ones((3, 2)))

return model.initial_point, model, (mu, tau ** -0.5)
return model.recompute_initial_point(), model, (mu, tau ** -0.5)


def simple_arbitrary_det():
Expand All @@ -67,7 +67,7 @@ def arbitrary_det(value):
b = arbitrary_det(a)
Normal("obs", mu=b.astype("float64"), observed=floatX_array([1, 3, 5]))

return model.initial_point, model
return model.recompute_initial_point(), model


def simple_init():
Expand All @@ -84,7 +84,7 @@ def simple_2model():
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Bernoulli("y", p)
return model.initial_point, model
return model.recompute_initial_point(), model


def simple_2model_continuous():
Expand All @@ -94,7 +94,7 @@ def simple_2model_continuous():
x = pm.Normal("x", mu, tau=tau, initval=0.1)
pm.Deterministic("logx", at.log(x))
pm.Beta("y", alpha=1, beta=1, size=2)
return model.initial_point, model
return model.recompute_initial_point(), model


def mv_simple():
Expand All @@ -110,7 +110,7 @@ def mv_simple():
)
H = tau
C = np.linalg.inv(H)
return model.initial_point, model, (mu, C)
return model.recompute_initial_point(), model, (mu, C)


def mv_simple_coarse():
Expand All @@ -126,7 +126,7 @@ def mv_simple_coarse():
)
H = tau
C = np.linalg.inv(H)
return model.initial_point, model, (mu, C)
return model.recompute_initial_point(), model, (mu, C)


def mv_simple_very_coarse():
Expand All @@ -142,7 +142,7 @@ def mv_simple_very_coarse():
)
H = tau
C = np.linalg.inv(H)
return model.initial_point, model, (mu, C)
return model.recompute_initial_point(), model, (mu, C)


def mv_simple_discrete():
Expand All @@ -160,7 +160,7 @@ def mv_simple_discrete():
else:
C[i, j] = -n * p[i] * p[j]

return model.initial_point, model, (mu, C)
return model.recompute_initial_point(), model, (mu, C)


def mv_prior_simple():
Expand All @@ -186,27 +186,27 @@ def mv_prior_simple():
x = pm.Flat("x", size=n)
x_obs = pm.MvNormal("x_obs", observed=obs, mu=x, cov=noise * np.eye(n))

return model.initial_point, model, (K, L, mu_post, std_post, noise)
return model.recompute_initial_point(), model, (K, L, mu_post, std_post, noise)


def non_normal(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 3, size=n, transform=None)
return model.initial_point, model, (np.tile([0.5], n), None)
return model.recompute_initial_point(), model, (np.tile([0.5], n), None)


def exponential_beta(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 1, size=n, transform=None)
pm.Exponential("y", 1, size=n, transform=None)
return model.initial_point, model, None
return model.recompute_initial_point(), model, None


def beta_bernoulli(n=2):
with pm.Model() as model:
pm.Beta("x", 3, 1, size=n, transform=None)
pm.Bernoulli("y", 0.5)
return model.initial_point, model, None
return model.recompute_initial_point(), model, None


def simple_normal(bounded_prior=False):
Expand All @@ -222,4 +222,4 @@ def simple_normal(bounded_prior=False):
mu_i = pm.Flat("mu_i")
pm.Normal("X_obs", mu=mu_i, sigma=sd, observed=x0)

return model.initial_point, model, None
return model.recompute_initial_point(), model, None
2 changes: 1 addition & 1 deletion pymc/tests/test_aesaraf.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def test_make_shared_replacements(self):

# Replace test1 with a shared variable, keep test 2 the same
replacement = pm.make_shared_replacements(
test_model.initial_point, [test_model.test2], test_model
test_model.recompute_initial_point(), [test_model.test2], test_model
)
assert (
test_model.test1.broadcastable
Expand Down
2 changes: 1 addition & 1 deletion pymc/tests/test_data_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_deterministic(self):
with pm.Model() as model:
X = pm.Data("X", data_values)
pm.Normal("y", 0, 1, observed=X)
model.logp(model.initial_point)
model.logp(model.recompute_initial_point())

def test_sample(self):
x = np.random.normal(size=100)
Expand Down
7 changes: 4 additions & 3 deletions pymc/tests/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2739,9 +2739,10 @@ def test_bound_shapes(self):
bound_shaped = Bound("boundedshaped", dist, lower=1, upper=10, shape=(3, 5))
bound_dims = Bound("boundeddims", dist, lower=1, upper=10, dims="sample")

dist_size = m.initial_point["boundedsized_interval__"].shape
dist_shape = m.initial_point["boundedshaped_interval__"].shape
dist_dims = m.initial_point["boundeddims_interval__"].shape
initial_point = m.recompute_initial_point()
dist_size = initial_point["boundedsized_interval__"].shape
dist_shape = initial_point["boundedshaped_interval__"].shape
dist_dims = initial_point["boundeddims_interval__"].shape

assert dist_size == (4, 5)
assert dist_shape == (3, 5)
Expand Down
2 changes: 1 addition & 1 deletion pymc/tests/test_distributions_random.py
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ def test_mixture_random_shape():
assert rand3.shape == (100, 20)

with m:
ppc = pm.sample_posterior_predictive([m.initial_point], samples=200)
ppc = pm.sample_posterior_predictive([m.recompute_initial_point()], samples=200)
assert ppc["like0"].shape == (200, 20)
assert ppc["like1"].shape == (200, 20)
assert ppc["like2"].shape == (200, 20)
Expand Down
7 changes: 5 additions & 2 deletions pymc/tests/test_distributions_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ def test_AR():
rho = Normal("rho", 0.0, 1.0)
y1 = AR1("y1", rho, 1.0, observed=data)
y2 = AR("y2", rho, 1.0, init=Normal.dist(0, 1), observed=data)
np.testing.assert_allclose(y1.logp(t.initial_point), y2.logp(t.initial_point))
initial_point = t.recompute_initial_point()
np.testing.assert_allclose(y1.logp(initial_point), y2.logp(initial_point))

# AR1 + constant
with Model() as t:
Expand Down Expand Up @@ -76,7 +77,9 @@ def test_AR_nd():
for i in range(n):
AR("y_%d" % i, beta[:, i], sigma=1.0, shape=T, initval=y_tp[:, i])

np.testing.assert_allclose(t0.logp(t0.initial_point), t1.logp(t1.initial_point))
np.testing.assert_allclose(
t0.logp(t0.recompute_initial_point()), t1.logp(t1.recompute_initial_point())
)


def test_GARCH11():
Expand Down
4 changes: 2 additions & 2 deletions pymc/tests/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_missing(data):

assert "y_missing" in model.named_vars

test_point = model.initial_point
test_point = model.recompute_initial_point()
assert not np.isnan(model.logp(test_point))

with model:
Expand All @@ -58,7 +58,7 @@ def test_missing_with_predictors():

assert "y_missing" in model.named_vars

test_point = model.initial_point
test_point = model.recompute_initial_point()
assert not np.isnan(model.logp(test_point))

with model:
Expand Down
Loading