Skip to content

Commit f8b19a5

Browse files
ArmavicaricardoV94
authored andcommitted
Fix RUF005
1 parent 4140361 commit f8b19a5

33 files changed

+70
-76
lines changed

pymc/_version.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,10 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=
9898

9999
for command in commands:
100100
try:
101-
dispcmd = str([command] + args)
101+
dispcmd = str([command, *args])
102102
# remember shell=False, so use git.cmd on windows, not just git
103103
process = subprocess.Popen(
104-
[command] + args,
104+
[command, *args],
105105
cwd=cwd,
106106
env=env,
107107
stdout=subprocess.PIPE,

pymc/backends/arviz.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def insert(self, k: str, v, idx: int):
149149

150150
# initialize if necessary
151151
if k not in self.trace_dict:
152-
array_shape = (self._len,) + value_shape
152+
array_shape = (self._len, *value_shape)
153153
self.trace_dict[k] = np.empty(array_shape, dtype=np.array(v).dtype)
154154

155155
# do the actual insertion

pymc/backends/ndarray.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -72,12 +72,12 @@ def setup(self, draws, chain, sampler_vars=None) -> None:
7272
self.draw_idx = old_draws
7373
for varname, shape in self.var_shapes.items():
7474
old_var_samples = self.samples[varname]
75-
new_var_samples = np.zeros((draws,) + shape, self.var_dtypes[varname])
75+
new_var_samples = np.zeros((draws, *shape), self.var_dtypes[varname])
7676
self.samples[varname] = np.concatenate((old_var_samples, new_var_samples), axis=0)
7777
else: # Otherwise, make array of zeros for each variable.
7878
self.draws = draws
7979
for varname, shape in self.var_shapes.items():
80-
self.samples[varname] = np.zeros((draws,) + shape, dtype=self.var_dtypes[varname])
80+
self.samples[varname] = np.zeros((draws, *shape), dtype=self.var_dtypes[varname])
8181

8282
if sampler_vars is None:
8383
return

pymc/distributions/discrete.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1159,7 +1159,7 @@ def logp(value, p):
11591159
value_clip = pt.shape_padleft(value_clip, p_.ndim - value_clip.ndim)
11601160
elif p.ndim < value_clip.ndim:
11611161
p = pt.shape_padleft(p, value_clip.ndim - p_.ndim)
1162-
pattern = (p.ndim - 1,) + tuple(range(p.ndim - 1))
1162+
pattern = (p.ndim - 1, *range(p.ndim - 1))
11631163
a = pt.log(
11641164
pt.take_along_axis(
11651165
p.dimshuffle(pattern),

pymc/distributions/dist_math.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ def random_choice(p, size):
334334
# probability. We must iterate over the elements of all the other
335335
# dimensions.
336336
# We first ensure that p is broadcasted to the output's shape
337-
size = to_tuple(size) + (1,)
337+
size = (*to_tuple(size), 1)
338338
p = np.broadcast_arrays(p, np.empty(size))[0]
339339
out_shape = p.shape[:-1]
340340
# np.random.choice accepts 1D p arrays, so we semiflatten p to

pymc/distributions/distribution.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -722,7 +722,7 @@ def rv_op(
722722
error_msg_on_access="Model variables cannot be created in the dist function. Use the `.dist` API"
723723
):
724724
dummy_rv = dist(*dummy_dist_params, dummy_size_param)
725-
dummy_params = [dummy_size_param] + dummy_dist_params
725+
dummy_params = [dummy_size_param, *dummy_dist_params]
726726
dummy_updates_dict = collect_default_updates(inputs=dummy_params, outputs=(dummy_rv,))
727727

728728
rv_type = type(
@@ -777,7 +777,7 @@ def change_custom_symbolic_dist_size(op, rv, new_size, expand):
777777
dummy_size_param = new_size.type()
778778
dummy_dist_params = [dist_param.type() for dist_param in old_dist_params]
779779
dummy_rv = dist(*dummy_dist_params, dummy_size_param)
780-
dummy_params = [dummy_size_param] + dummy_dist_params
780+
dummy_params = [dummy_size_param, *dummy_dist_params]
781781
dummy_updates_dict = collect_default_updates(inputs=dummy_params, outputs=(dummy_rv,))
782782
new_rv_op = rv_type(
783783
inputs=dummy_params,

pymc/distributions/mixture.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ def _resize_components(cls, size, *components):
315315
# axis intact, because that's what determines the number of mixture components
316316
mix_axis = -components[0].owner.op.ndim_supp - 1
317317
mix_size = components[0].shape[mix_axis]
318-
size = tuple(size) + (mix_size,)
318+
size = (*size, mix_size)
319319

320320
return [change_dist_size(component, size) for component in components]
321321

pymc/distributions/multivariate.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,7 @@ def rng_fn(cls, rng, n, a, size):
616616

617617
if size:
618618
n = np.broadcast_to(n, size)
619-
a = np.broadcast_to(a, size + (a.shape[-1],))
619+
a = np.broadcast_to(a, (*size, a.shape[-1]))
620620

621621
res = np.empty(a.shape)
622622
for idx in np.ndindex(a.shape[:-1]):
@@ -1205,7 +1205,7 @@ def rv_op(cls, n, eta, sd_dist, size=None):
12051205
# implied batched dimensions from those for the time being.
12061206
if size is None:
12071207
size = sd_dist.shape[:-1]
1208-
shape = tuple(size) + (n,)
1208+
shape = (*size, n)
12091209
if sd_dist.owner.op.ndim_supp == 0:
12101210
sd_dist = change_dist_size(sd_dist, shape)
12111211
else:
@@ -1685,7 +1685,7 @@ def dist(cls, n, eta, *, return_matrix=False, **kwargs):
16851685
@classmethod
16861686
def vec_to_corr_mat(cls, vec, n):
16871687
tri = pt.zeros(pt.concatenate([vec.shape[:-1], (n, n)]))
1688-
tri = pt.subtensor.set_subtensor(tri[(...,) + np.triu_indices(n, 1)], vec)
1688+
tri = pt.subtensor.set_subtensor(tri[(..., *np.triu_indices(n, 1))], vec)
16891689
return tri + pt.moveaxis(tri, -2, -1) + pt.diag(pt.ones(n))
16901690

16911691

@@ -2183,7 +2183,7 @@ def rng_fn(cls, rng: np.random.RandomState, mu, W, alpha, tau, size):
21832183

21842184
size = tuple(size or ())
21852185
if size:
2186-
mu = np.broadcast_to(mu, size + (mu.shape[-1],))
2186+
mu = np.broadcast_to(mu, (*size, mu.shape[-1]))
21872187
z = rng.normal(size=mu.shape)
21882188
samples = np.empty(z.shape)
21892189
for idx in np.ndindex(mu.shape[:-1]):
@@ -2492,7 +2492,7 @@ def rng_fn(cls, rng, alpha, K, size):
24922492
raise ValueError("K needs to be positive.")
24932493

24942494
size = to_tuple(size) if size is not None else alpha.shape
2495-
size = size + (K,)
2495+
size = (*size, K)
24962496
alpha = alpha[..., np.newaxis]
24972497

24982498
betas = rng.beta(1, alpha, size=size)

pymc/distributions/timeseries.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -819,7 +819,7 @@ def step(prev_y, prev_sigma, omega, alpha_1, beta_1, rng):
819819
(noise_next_rng,) = tuple(innov_updates_.values())
820820

821821
garch11_ = pt.concatenate([init_[None, ...], y_t], axis=0).dimshuffle(
822-
tuple(range(1, y_t.ndim)) + (0,)
822+
(*range(1, y_t.ndim), 0)
823823
)
824824

825825
garch11_op = GARCH11RV(
@@ -850,7 +850,7 @@ def garch11_logp(
850850
):
851851
(value,) = values
852852
# Move the time axis to the first dimension
853-
value_dimswapped = value.dimshuffle((value.ndim - 1,) + tuple(range(0, value.ndim - 1)))
853+
value_dimswapped = value.dimshuffle((value.ndim - 1, *range(0, value.ndim - 1)))
854854
initial_vol = initial_vol * pt.ones_like(value_dimswapped[0])
855855

856856
def volatility_update(x, vol, w, a, b):
@@ -991,18 +991,18 @@ def step(*prev_args):
991991
y_t, innov_updates_ = pytensor.scan(
992992
fn=step,
993993
outputs_info=[init_],
994-
non_sequences=sde_pars_ + [noise_rng],
994+
non_sequences=[*sde_pars_, noise_rng],
995995
n_steps=steps_,
996996
strict=True,
997997
)
998998
(noise_next_rng,) = tuple(innov_updates_.values())
999999

10001000
sde_out_ = pt.concatenate([init_[None, ...], y_t], axis=0).dimshuffle(
1001-
tuple(range(1, y_t.ndim)) + (0,)
1001+
(*range(1, y_t.ndim), 0)
10021002
)
10031003

10041004
eulermaruyama_op = EulerMaruyamaRV(
1005-
inputs=[init_, steps_] + sde_pars_,
1005+
inputs=[init_, steps_, *sde_pars_],
10061006
outputs=[noise_next_rng, sde_out_],
10071007
dt=dt,
10081008
sde_fn=sde_fn,

pymc/distributions/transforms.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@ def extend_axis_rev(array, axis):
303303
norm = sum_vals / (pt.sqrt(n) + n)
304304
slice_before = (slice(None, None),) * normalized_axis
305305

306-
return array[slice_before + (slice(None, -1),)] + norm
306+
return array[(*slice_before, slice(None, -1))] + norm
307307

308308
def forward(self, value, *rv_inputs):
309309
for axis in self.zerosum_axes:

pymc/logprob/mixture.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ def find_measurable_index_mixture(fgraph, node):
310310
old_mixture_rv.dtype,
311311
old_mixture_rv.broadcastable,
312312
)
313-
new_node = mix_op.make_node(*([join_axis] + mixing_indices + mixture_rvs))
313+
new_node = mix_op.make_node(*([join_axis, *mixing_indices, *mixture_rvs]))
314314

315315
new_mixture_rv = new_node.default_output()
316316

pymc/logprob/rewriting.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ def request_measurable(self, vars: Sequence[Variable]) -> list[Variable]:
285285

286286

287287
@register_canonicalize
288-
@node_rewriter((Elemwise, Alloc, DimShuffle) + subtensor_ops)
288+
@node_rewriter((Elemwise, Alloc, DimShuffle, *subtensor_ops))
289289
def local_lift_DiracDelta(fgraph, node):
290290
r"""Lift basic `Op`\s through `DiracDelta`\s."""
291291

pymc/logprob/scan.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -164,14 +164,14 @@ def remove(x, i):
164164
inner_in_mit_sot_var = cast(
165165
tuple[int, ...], tuple(output_scan_args.inner_in_mit_sot[var_idx])
166166
)
167-
new_inner_in_seqs = inner_in_mit_sot_var + (new_inner_in_var,)
167+
new_inner_in_seqs = (*inner_in_mit_sot_var, new_inner_in_var)
168168
new_inner_in_mit_sot = remove(output_scan_args.inner_in_mit_sot, var_idx)
169169
new_outer_in_mit_sot = remove(output_scan_args.outer_in_mit_sot, var_idx)
170170
new_inner_in_sit_sot = tuple(output_scan_args.inner_in_sit_sot)
171171
new_outer_in_sit_sot = tuple(output_scan_args.outer_in_sit_sot)
172172
add_nit_sot = True
173173
elif inner_out_info.name.endswith("sit_sot"):
174-
new_inner_in_seqs = (output_scan_args.inner_in_sit_sot[var_idx],) + (new_inner_in_var,)
174+
new_inner_in_seqs = (output_scan_args.inner_in_sit_sot[var_idx], new_inner_in_var)
175175
new_inner_in_sit_sot = remove(output_scan_args.inner_in_sit_sot, var_idx)
176176
new_outer_in_sit_sot = remove(output_scan_args.outer_in_sit_sot, var_idx)
177177
new_inner_in_mit_sot = tuple(output_scan_args.inner_in_mit_sot)
@@ -193,7 +193,7 @@ def remove(x, i):
193193
mit_sot_var_taps = cast(
194194
tuple[int, ...], tuple(output_scan_args.mit_sot_in_slices[var_idx])
195195
)
196-
taps = mit_sot_var_taps + (0,)
196+
taps = (*mit_sot_var_taps, 0)
197197
new_mit_sot_in_slices = remove(output_scan_args.mit_sot_in_slices, var_idx)
198198
elif inner_out_info.name.endswith("sit_sot"):
199199
taps = (-1, 0)
@@ -230,7 +230,7 @@ def remove(x, i):
230230
output_scan_args.outer_in_seqs = list(new_outer_in_seqs)
231231

232232
if add_nit_sot:
233-
new_outer_in_nit_sot = tuple(output_scan_args.outer_in_nit_sot) + (n_steps,)
233+
new_outer_in_nit_sot = (*output_scan_args.outer_in_nit_sot, n_steps)
234234
else:
235235
new_outer_in_nit_sot = tuple(output_scan_args.outer_in_nit_sot)
236236

pymc/model/core.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,7 @@ def __init__(
334334
grads = pytensor.grad(cost, grad_vars, disconnected_inputs="ignore")
335335
for grad_wrt, var in zip(grads, grad_vars):
336336
grad_wrt.name = f"{var.name}_grad"
337-
outputs = [cost] + grads
337+
outputs = [cost, *grads]
338338
else:
339339
outputs = [cost]
340340

pymc/plots/__init__.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,8 @@ def wrapped(*args, **kwargs):
5656
compareplot = alias_deprecation(az.plot_compare, alias="compareplot")
5757

5858

59-
__all__ = tuple(az.plots.__all__) + (
59+
__all__ = (
60+
*az.plots.__all__,
6061
"autocorrplot",
6162
"compareplot",
6263
"forestplot",

pymc/sampling/jax.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858

5959
xla_flags_env = os.getenv("XLA_FLAGS", "")
6060
xla_flags = re.sub(r"--xla_force_host_platform_device_count=.+\s", "", xla_flags_env).split()
61-
os.environ["XLA_FLAGS"] = " ".join([f"--xla_force_host_platform_device_count={100}"] + xla_flags)
61+
os.environ["XLA_FLAGS"] = " ".join([f"--xla_force_host_platform_device_count={100}", *xla_flags])
6262

6363
__all__ = (
6464
"get_jaxified_graph",

pymc/stats/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,4 +29,4 @@
2929

3030
from pymc.stats.log_likelihood import compute_log_likelihood
3131

32-
__all__ = ("compute_log_likelihood",) + tuple(az.stats.__all__)
32+
__all__ = ("compute_log_likelihood", *az.stats.__all__)

pymc/step_methods/compound.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def __new__(cls, *args, **kwargs):
151151
# call __init__
152152
step.__init__([var], *args, **kwargs)
153153
# Hack for creating the class correctly when unpickling.
154-
step.__newargs = ([var],) + args, kwargs
154+
step.__newargs = ([var], *args), kwargs
155155
steps.append(step)
156156

157157
return CompoundStep(steps)
@@ -160,7 +160,7 @@ def __new__(cls, *args, **kwargs):
160160
step.stats_dtypes = stats_dtypes
161161
step.stats_dtypes_shapes = stats_dtypes_shapes
162162
# Hack for creating the class correctly when unpickling.
163-
step.__newargs = (vars,) + args, kwargs
163+
step.__newargs = (vars, *args), kwargs
164164
return step
165165

166166
# Hack for creating the class correctly when unpickling.

pymc/testing.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def __neg__(self):
143143
class ProductDomain:
144144
def __init__(self, domains):
145145
self.vals = list(it.product(*(d.vals for d in domains)))
146-
self.shape = (len(domains),) + domains[0].shape
146+
self.shape = (len(domains), *domains[0].shape)
147147
self.lower = [d.lower for d in domains]
148148
self.upper = [d.upper for d in domains]
149149
self.dtype = domains[0].dtype

pymc/variational/opvi.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1511,7 +1511,7 @@ def vars_names(vs):
15111511
):
15121512
if name in vars_names(vars_):
15131513
name_, slc, shape, dtype = ordering[name]
1514-
found = random[..., slc].reshape((random.shape[0],) + shape).astype(dtype)
1514+
found = random[..., slc].reshape((random.shape[0], *shape)).astype(dtype)
15151515
found.name = name + "_vi_random_slice"
15161516
break
15171517
else:

pymc/variational/updates.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ def adagrad_window(loss_or_grads=None, params=None, learning_rate=0.001, epsilon
570570
i = pytensor.shared(pm.floatX(0))
571571
i_int = i.astype("int32")
572572
value = param.get_value(borrow=True)
573-
accu = pytensor.shared(np.zeros(value.shape + (n_win,), dtype=value.dtype))
573+
accu = pytensor.shared(np.zeros((*value.shape, n_win), dtype=value.dtype))
574574

575575
# Append squared gradient vector to accu_new
576576
accu_new = pt.set_subtensor(accu[..., i_int], grad**2)

tests/backends/fixtures.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def test_append_invalid(self):
6060
with pytest.raises(ValueError):
6161
self.strace.setup(self.draws, self.chain)
6262
with pytest.raises(ValueError):
63-
vars = self.sampler_vars + [{"a": bool}]
63+
vars = [*self.sampler_vars, {"a": bool}]
6464
self.strace.setup(self.draws, self.chain, vars)
6565
else:
6666
with pytest.raises((ValueError, TypeError)):

tests/backends/test_arviz.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ def test_to_idata(self, data, eight_schools_params, chains, draws):
156156
chains = inference_data.posterior.sizes["chain"]
157157
draws = inference_data.posterior.sizes["draw"]
158158
obs = inference_data.observed_data["obs"]
159-
assert inference_data.log_likelihood["obs"].shape == (chains, draws) + obs.shape
159+
assert inference_data.log_likelihood["obs"].shape == (chains, draws, *obs.shape)
160160

161161
def test_predictions_to_idata(self, data, eight_schools_params):
162162
"Test that we can add predictions to a previously-existing InferenceData."

tests/distributions/test_discrete.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -455,9 +455,9 @@ def test_orderedlogistic_dimensions(shape):
455455
# Test for issue #3535
456456
loge = np.log10(np.exp(1))
457457
size = 7
458-
p = np.ones(shape + (10,)) / 10
459-
cutpoints = np.tile(sp.logit(np.linspace(0, 1, 11)[1:-1]), shape + (1,))
460-
obs = np.random.randint(0, 2, size=(size,) + shape)
458+
p = np.ones((*shape, 10)) / 10
459+
cutpoints = np.tile(sp.logit(np.linspace(0, 1, 11)[1:-1]), (*shape, 1))
460+
obs = np.random.randint(0, 2, size=(size, *shape))
461461
with pm.Model():
462462
ol = pm.OrderedLogistic(
463463
"ol",
@@ -472,7 +472,7 @@ def test_orderedlogistic_dimensions(shape):
472472
)
473473
ologp = pm.logp(ol, np.ones_like(obs)).sum().eval() * loge
474474
clogp = pm.logp(c, np.ones_like(obs)).sum().eval() * loge
475-
expected = -np.prod((size,) + shape)
475+
expected = -np.prod((size, *shape))
476476

477477
assert c.owner.inputs[3].ndim == (len(shape) + 1)
478478
assert np.allclose(clogp, expected)

0 commit comments

Comments
 (0)