diff --git a/environment-osx-arm64.yml b/environment-osx-arm64.yml index 0d624aa55c..13a68faaaa 100644 --- a/environment-osx-arm64.yml +++ b/environment-osx-arm64.yml @@ -10,7 +10,7 @@ dependencies: - python=>3.10 - compilers - numpy>=1.17.0,<2 - - scipy>=0.14,<1.14.0 + - scipy>=1,<2 - filelock>=3.15 - etuples - logical-unification diff --git a/environment.yml b/environment.yml index 95bb58c06c..4b213fd851 100644 --- a/environment.yml +++ b/environment.yml @@ -10,7 +10,7 @@ dependencies: - python>=3.10 - compilers - numpy>=1.17.0,<2 - - scipy>=0.14,<1.14.0 + - scipy>=1,<2 - filelock>=3.15 - etuples - logical-unification diff --git a/pyproject.toml b/pyproject.toml index 81a1285da8..bebba8a7de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,7 @@ keywords = [ ] dependencies = [ "setuptools>=59.0.0", - "scipy>=0.14,<1.14", + "scipy>=1,<2", "numpy>=1.17.0,<2", "filelock>=3.15", "etuples", diff --git a/pytensor/gradient.py b/pytensor/gradient.py index abf80bff43..6b3a1a4b62 100644 --- a/pytensor/gradient.py +++ b/pytensor/gradient.py @@ -4,7 +4,7 @@ import warnings from collections.abc import Callable, Mapping, MutableSequence, Sequence from functools import partial, reduce -from typing import TYPE_CHECKING, Literal, TypeVar, Union +from typing import TYPE_CHECKING, Literal, TypeVar, Union, overload import numpy as np @@ -414,6 +414,32 @@ def Lop( return as_list_or_tuple(using_list, using_tuple, ret) +@overload +def grad( + cost: Variable | None, + wrt: Variable | Sequence[Variable], + consider_constant: Sequence[Variable] | None = ..., + disconnected_inputs: Literal["ignore", "warn", "raise"] = ..., + add_names: bool = ..., + known_grads: Mapping[Variable, Variable] | None = ..., + return_disconnected: Literal["zero", "disconnected"] = ..., + null_gradients: Literal["raise", "return"] = ..., +) -> Variable | None | Sequence[Variable]: ... + + +@overload +def grad( + cost: Variable | None, + wrt: Variable | Sequence[Variable], + consider_constant: Sequence[Variable] | None = ..., + disconnected_inputs: Literal["ignore", "warn", "raise"] = ..., + add_names: bool = ..., + known_grads: Mapping[Variable, Variable] | None = ..., + return_disconnected: Literal["none"] = ..., + null_gradients: Literal["raise", "return"] = ..., +) -> Variable | None | Sequence[Variable | None]: ... + + def grad( cost: Variable | None, wrt: Variable | Sequence[Variable], @@ -423,7 +449,7 @@ def grad( known_grads: Mapping[Variable, Variable] | None = None, return_disconnected: Literal["none", "zero", "disconnected"] = "zero", null_gradients: Literal["raise", "return"] = "raise", -) -> Variable | None | Sequence[Variable | None]: +) -> Variable | None | Sequence[Variable | None] | Sequence[Variable]: """ Return symbolic gradients of one cost with respect to one or more variables. diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py index 2ffd101c23..ed1ad6b6c2 100644 --- a/pytensor/graph/basic.py +++ b/pytensor/graph/basic.py @@ -710,7 +710,7 @@ def clone(self, **kwargs): return cp -class NominalVariable(AtomicVariable[_TypeType]): +class NominalVariable(Generic[_TypeType, _IdType], AtomicVariable[_TypeType]): """A variable that enables alpha-equivalent comparisons.""" __instances__: dict[tuple["Type", Hashable], "NominalVariable"] = {} @@ -1313,8 +1313,9 @@ def clone_get_equiv( outputs: Reversible[Variable], copy_inputs: bool = True, copy_orphans: bool = True, - memo: dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]] - | None = None, + memo: ( + dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]] | None + ) = None, clone_inner_graphs: bool = False, **kwargs, ) -> dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]]: diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py index d4c41d5cb5..d6fcfc0723 100644 --- a/pytensor/scalar/basic.py +++ b/pytensor/scalar/basic.py @@ -1140,14 +1140,25 @@ def output_types(self, types): else: raise NotImplementedError(f"Cannot calculate the output types for {self}") + @staticmethod + def _cast_scalar(x, dtype): + if hasattr(x, "astype"): + return x.astype(dtype) + elif dtype == "bool": + return np.bool_(x) + else: + return getattr(np, dtype)(x) + def perform(self, node, inputs, output_storage): if self.nout == 1: - output_storage[0][0] = self.impl(*inputs) + dtype = node.outputs[0].dtype + output_storage[0][0] = self._cast_scalar(self.impl(*inputs), dtype) else: variables = from_return_values(self.impl(*inputs)) assert len(variables) == len(output_storage) - for storage, variable in zip(output_storage, variables): - storage[0] = variable + for out, storage, variable in zip(node.outputs, output_storage, variables): + dtype = out.dtype + storage[0] = self._cast_scalar(variable, dtype) def impl(self, *inputs): raise MethodNotDefined("impl", type(self), self.__class__.__name__) diff --git a/pytensor/tensor/elemwise.py b/pytensor/tensor/elemwise.py index de966f1a78..1b0d433dda 100644 --- a/pytensor/tensor/elemwise.py +++ b/pytensor/tensor/elemwise.py @@ -767,34 +767,16 @@ def perform(self, node, inputs, output_storage): for i, (variable, storage, nout) in enumerate( zip(variables, output_storage, node.outputs) ): - if getattr(variable, "dtype", "") == "object": - # Since numpy 1.6, function created with numpy.frompyfunc - # always return an ndarray with dtype object - variable = np.asarray(variable, dtype=nout.dtype) + storage[0] = variable = np.asarray(variable, dtype=nout.dtype) if i in self.inplace_pattern: odat = inputs[self.inplace_pattern[i]] odat[...] = variable storage[0] = odat - # Sometimes NumPy return a Python type. - # Some PyTensor op return a different dtype like floor, ceil, - # trunc, eq, ... - elif not isinstance(variable, np.ndarray) or variable.dtype != nout.dtype: - variable = np.asarray(variable, nout.dtype) - # The next line is needed for numpy 1.9. Otherwise - # there are tests that fail in DebugMode. - # Normally we would call pytensor.misc._asarray, but it - # is faster to inline the code. We know that the dtype - # are the same string, just different typenum. - if np.dtype(nout.dtype).num != variable.dtype.num: - variable = variable.view(dtype=nout.dtype) - storage[0] = variable # numpy.real return a view! - elif not variable.flags.owndata: + if not variable.flags.owndata: storage[0] = variable.copy() - else: - storage[0] = variable @staticmethod def _check_runtime_broadcast(node, inputs): diff --git a/tests/scalar/test_loop.py b/tests/scalar/test_loop.py index 88f1a588fd..88d14c6e43 100644 --- a/tests/scalar/test_loop.py +++ b/tests/scalar/test_loop.py @@ -212,12 +212,17 @@ def test_inner_composite(mode): y16 = op(n_steps, x16) assert y16.type.dtype == "float16" - fn32 = function([n_steps, x16], y16, mode=mode) + fn16 = function([n_steps, x16], y16, mode=mode) + out16 = fn16(n_steps=3, x16=np.array(4.73, dtype="float16")) np.testing.assert_allclose( - fn32(n_steps=9, x16=np.array(4.73, dtype="float16")), - 4.73 + 9, + out16, + 4.73 + 3, rtol=1e-3, ) + out16overflow = fn16(n_steps=9, x16=np.array(4.73, dtype="float16")) + assert out16overflow.dtype == "float16" + # with this dtype overflow happens + assert np.isnan(out16overflow) @mode @@ -243,8 +248,10 @@ def test_inner_loop(mode): y16 = outer_loop_op(n_steps, x16, n_steps) assert y16.type.dtype == "float16" - fn32 = function([n_steps, x16], y16, mode=mode) + fn16 = function([n_steps, x16], y16, mode=mode) + out16 = fn16(n_steps=3, x16=np.array(2.5, dtype="float16")) + assert out16.dtype == "float16" np.testing.assert_allclose( - fn32(n_steps=3, x16=np.array(2.5, dtype="float16")), + out16, 3**2 + 2.5, ) diff --git a/tests/tensor/utils.py b/tests/tensor/utils.py index 2f97d0e18f..85c48a42dd 100644 --- a/tests/tensor/utils.py +++ b/tests/tensor/utils.py @@ -508,15 +508,17 @@ def test_good(self): if not isinstance(expecteds, list | tuple): expecteds = (expecteds,) - for i, (variable, expected) in enumerate(zip(variables, expecteds)): + for i, (variable, expected, out_symbol) in enumerate( + zip(variables, expecteds, node.outputs) + ): condition = ( - variable.dtype != expected.dtype + variable.dtype != out_symbol.type.dtype or variable.shape != expected.shape or not np.allclose(variable, expected, atol=eps, rtol=eps) ) assert not condition, ( f"Test {self.op}::{testname}: Output {i} gave the wrong" - f" value. With inputs {inputs}, expected {expected} (dtype {expected.dtype})," + f" value. With inputs {inputs}, expected {expected} (dtype {out_symbol.type.dtype})," f" got {variable} (dtype {variable.dtype}). eps={eps:f}" f" np.allclose returns {np.allclose(variable, expected, atol=eps, rtol=eps)} {np.allclose(variable, expected)}" )