Skip to content

Unpin scipy upper version #972

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion environment-osx-arm64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ dependencies:
- python=>3.10
- compilers
- numpy>=1.17.0,<2
- scipy>=0.14,<1.14.0
- scipy>=1,<2
- filelock>=3.15
- etuples
- logical-unification
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ dependencies:
- python>=3.10
- compilers
- numpy>=1.17.0,<2
- scipy>=0.14,<1.14.0
- scipy>=1,<2
- filelock>=3.15
- etuples
- logical-unification
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ keywords = [
]
dependencies = [
"setuptools>=59.0.0",
"scipy>=0.14,<1.14",
"scipy>=1,<2",
"numpy>=1.17.0,<2",
"filelock>=3.15",
"etuples",
Expand Down
30 changes: 28 additions & 2 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import warnings
from collections.abc import Callable, Mapping, MutableSequence, Sequence
from functools import partial, reduce
from typing import TYPE_CHECKING, Literal, TypeVar, Union
from typing import TYPE_CHECKING, Literal, TypeVar, Union, overload

import numpy as np

Expand Down Expand Up @@ -414,6 +414,32 @@ def Lop(
return as_list_or_tuple(using_list, using_tuple, ret)


@overload
def grad(
cost: Variable | None,
wrt: Variable | Sequence[Variable],
consider_constant: Sequence[Variable] | None = ...,
disconnected_inputs: Literal["ignore", "warn", "raise"] = ...,
add_names: bool = ...,
known_grads: Mapping[Variable, Variable] | None = ...,
return_disconnected: Literal["zero", "disconnected"] = ...,
null_gradients: Literal["raise", "return"] = ...,
) -> Variable | None | Sequence[Variable]: ...


@overload
def grad(
cost: Variable | None,
wrt: Variable | Sequence[Variable],
consider_constant: Sequence[Variable] | None = ...,
disconnected_inputs: Literal["ignore", "warn", "raise"] = ...,
add_names: bool = ...,
known_grads: Mapping[Variable, Variable] | None = ...,
return_disconnected: Literal["none"] = ...,
null_gradients: Literal["raise", "return"] = ...,
) -> Variable | None | Sequence[Variable | None]: ...


def grad(
cost: Variable | None,
wrt: Variable | Sequence[Variable],
Expand All @@ -423,7 +449,7 @@ def grad(
known_grads: Mapping[Variable, Variable] | None = None,
return_disconnected: Literal["none", "zero", "disconnected"] = "zero",
null_gradients: Literal["raise", "return"] = "raise",
) -> Variable | None | Sequence[Variable | None]:
) -> Variable | None | Sequence[Variable | None] | Sequence[Variable]:
"""
Return symbolic gradients of one cost with respect to one or more variables.

Expand Down
7 changes: 4 additions & 3 deletions pytensor/graph/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,7 +710,7 @@ def clone(self, **kwargs):
return cp


class NominalVariable(AtomicVariable[_TypeType]):
class NominalVariable(Generic[_TypeType, _IdType], AtomicVariable[_TypeType]):
"""A variable that enables alpha-equivalent comparisons."""

__instances__: dict[tuple["Type", Hashable], "NominalVariable"] = {}
Expand Down Expand Up @@ -1313,8 +1313,9 @@ def clone_get_equiv(
outputs: Reversible[Variable],
copy_inputs: bool = True,
copy_orphans: bool = True,
memo: dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]]
| None = None,
memo: (
dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]] | None
) = None,
clone_inner_graphs: bool = False,
**kwargs,
) -> dict[Union[Apply, Variable, "Op"], Union[Apply, Variable, "Op"]]:
Expand Down
17 changes: 14 additions & 3 deletions pytensor/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1140,14 +1140,25 @@
else:
raise NotImplementedError(f"Cannot calculate the output types for {self}")

@staticmethod
def _cast_scalar(x, dtype):
if hasattr(x, "astype"):
return x.astype(dtype)
elif dtype == "bool":
return np.bool_(x)
else:
return getattr(np, dtype)(x)

def perform(self, node, inputs, output_storage):
if self.nout == 1:
output_storage[0][0] = self.impl(*inputs)
dtype = node.outputs[0].dtype
output_storage[0][0] = self._cast_scalar(self.impl(*inputs), dtype)
else:
variables = from_return_values(self.impl(*inputs))
assert len(variables) == len(output_storage)
for storage, variable in zip(output_storage, variables):
storage[0] = variable
for out, storage, variable in zip(node.outputs, output_storage, variables):
dtype = out.dtype
storage[0] = self._cast_scalar(variable, dtype)

Check warning on line 1161 in pytensor/scalar/basic.py

View check run for this annotation

Codecov / codecov/patch

pytensor/scalar/basic.py#L1160-L1161

Added lines #L1160 - L1161 were not covered by tests

def impl(self, *inputs):
raise MethodNotDefined("impl", type(self), self.__class__.__name__)
Expand Down
22 changes: 2 additions & 20 deletions pytensor/tensor/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -767,34 +767,16 @@ def perform(self, node, inputs, output_storage):
for i, (variable, storage, nout) in enumerate(
zip(variables, output_storage, node.outputs)
):
if getattr(variable, "dtype", "") == "object":
# Since numpy 1.6, function created with numpy.frompyfunc
# always return an ndarray with dtype object
variable = np.asarray(variable, dtype=nout.dtype)
storage[0] = variable = np.asarray(variable, dtype=nout.dtype)

if i in self.inplace_pattern:
odat = inputs[self.inplace_pattern[i]]
odat[...] = variable
storage[0] = odat

# Sometimes NumPy return a Python type.
# Some PyTensor op return a different dtype like floor, ceil,
# trunc, eq, ...
elif not isinstance(variable, np.ndarray) or variable.dtype != nout.dtype:
variable = np.asarray(variable, nout.dtype)
# The next line is needed for numpy 1.9. Otherwise
# there are tests that fail in DebugMode.
# Normally we would call pytensor.misc._asarray, but it
# is faster to inline the code. We know that the dtype
# are the same string, just different typenum.
if np.dtype(nout.dtype).num != variable.dtype.num:
variable = variable.view(dtype=nout.dtype)
storage[0] = variable
# numpy.real return a view!
elif not variable.flags.owndata:
if not variable.flags.owndata:
storage[0] = variable.copy()
else:
storage[0] = variable

@staticmethod
def _check_runtime_broadcast(node, inputs):
Expand Down
17 changes: 12 additions & 5 deletions tests/scalar/test_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,12 +212,17 @@ def test_inner_composite(mode):
y16 = op(n_steps, x16)
assert y16.type.dtype == "float16"

fn32 = function([n_steps, x16], y16, mode=mode)
fn16 = function([n_steps, x16], y16, mode=mode)
out16 = fn16(n_steps=3, x16=np.array(4.73, dtype="float16"))
np.testing.assert_allclose(
fn32(n_steps=9, x16=np.array(4.73, dtype="float16")),
4.73 + 9,
out16,
4.73 + 3,
rtol=1e-3,
)
out16overflow = fn16(n_steps=9, x16=np.array(4.73, dtype="float16"))
assert out16overflow.dtype == "float16"
# with this dtype overflow happens
assert np.isnan(out16overflow)


@mode
Expand All @@ -243,8 +248,10 @@ def test_inner_loop(mode):
y16 = outer_loop_op(n_steps, x16, n_steps)
assert y16.type.dtype == "float16"

fn32 = function([n_steps, x16], y16, mode=mode)
fn16 = function([n_steps, x16], y16, mode=mode)
out16 = fn16(n_steps=3, x16=np.array(2.5, dtype="float16"))
assert out16.dtype == "float16"
np.testing.assert_allclose(
fn32(n_steps=3, x16=np.array(2.5, dtype="float16")),
out16,
3**2 + 2.5,
)
8 changes: 5 additions & 3 deletions tests/tensor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,15 +508,17 @@ def test_good(self):
if not isinstance(expecteds, list | tuple):
expecteds = (expecteds,)

for i, (variable, expected) in enumerate(zip(variables, expecteds)):
for i, (variable, expected, out_symbol) in enumerate(
zip(variables, expecteds, node.outputs)
):
condition = (
variable.dtype != expected.dtype
variable.dtype != out_symbol.type.dtype
or variable.shape != expected.shape
or not np.allclose(variable, expected, atol=eps, rtol=eps)
)
assert not condition, (
f"Test {self.op}::{testname}: Output {i} gave the wrong"
f" value. With inputs {inputs}, expected {expected} (dtype {expected.dtype}),"
f" value. With inputs {inputs}, expected {expected} (dtype {out_symbol.type.dtype}),"
f" got {variable} (dtype {variable.dtype}). eps={eps:f}"
f" np.allclose returns {np.allclose(variable, expected, atol=eps, rtol=eps)} {np.allclose(variable, expected)}"
)
Expand Down
Loading