From 1b6dfe005f26670a99429b409a5c368330bbf648 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Tue, 13 Jun 2023 11:17:57 +0200 Subject: [PATCH 01/17] add arange --- _unittests/onnx-numpy-skips.txt | 1 - _unittests/test_array_api.sh | 4 +++- onnx_array_api/array_api/onnx_numpy.py | 18 ++++++++++++++++ onnx_array_api/npx/npx_functions.py | 30 +++++++++++++------------- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/_unittests/onnx-numpy-skips.txt b/_unittests/onnx-numpy-skips.txt index 3cdbb31..92a5a29 100644 --- a/_unittests/onnx-numpy-skips.txt +++ b/_unittests/onnx-numpy-skips.txt @@ -1,5 +1,4 @@ # API failures -array_api_tests/test_creation_functions.py::test_arange array_api_tests/test_creation_functions.py::test_asarray_scalars array_api_tests/test_creation_functions.py::test_asarray_arrays array_api_tests/test_creation_functions.py::test_empty diff --git a/_unittests/test_array_api.sh b/_unittests/test_array_api.sh index b32ee41..7c29f82 100644 --- a/_unittests/test_array_api.sh +++ b/_unittests/test_array_api.sh @@ -1,2 +1,4 @@ export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy -pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_zeros \ No newline at end of file +pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_arange || exit 1 +pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_ones || exit 1 +pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_zeros || exit 1 diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index c20fb15..d104c5b 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -16,6 +16,7 @@ reshape, take, ) +from ..npx.npx_functions import arange as generic_arange from ..npx.npx_functions import ones as generic_ones from ..npx.npx_functions import zeros as generic_zeros from ..npx.npx_numpy_tensors import EagerNumpyTensor @@ -27,6 +28,7 @@ "abs", "absolute", "all", + "arange", "asarray", "astype", "equal", @@ -55,6 +57,22 @@ def asarray( ) +def arange( + start_or_stop: TensorType[ElemType.int64, "I", (1,)], + stop_or_step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, + step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, + dtype: OptParType[DType] = None, +) -> TensorType[ElemType.numerics, "T"]: + print("####", start_or_stop, stop_or_step, step, dtype) + if isinstance(start_or_stop, int): + start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.int64)) + if isinstance(stop_or_step, int): + stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.int64)) + if isinstance(step, int): + step = EagerNumpyTensor(np.array([step], dtype=np.int64)) + return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) + + def ones( shape: TensorType[ElemType.int64, "I", (None,)], dtype: OptParType[DType] = DType(TensorProto.FLOAT), diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 29a4481..c15ebe7 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -75,20 +75,6 @@ def all( return var(red, cst(1), op="Equal") -@npxapi_inline -def arccos(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: - "See :func:`numpy.arccos`." - return var(x, op="Acos") - - -@npxapi_inline -def arccosh( - x: TensorType[ElemType.numerics, "T"] -) -> TensorType[ElemType.numerics, "T"]: - "See :func:`numpy.arccosh`." - return var(x, op="Acosh") - - @npxapi_inline def amax( x: TensorType[ElemType.numerics, "T"], @@ -118,7 +104,7 @@ def arange( start_or_stop: TensorType[ElemType.int64, "I", (1,)], stop_or_step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, - dtype=None, + dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." if stop_or_step is None: @@ -139,6 +125,20 @@ def arange( return v +@npxapi_inline +def arccos(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: + "See :func:`numpy.arccos`." + return var(x, op="Acos") + + +@npxapi_inline +def arccosh( + x: TensorType[ElemType.numerics, "T"] +) -> TensorType[ElemType.numerics, "T"]: + "See :func:`numpy.arccosh`." + return var(x, op="Acosh") + + @npxapi_inline def argmax( x: TensorType[ElemType.numerics, "T"], From ec02850bfc018e5bdec93d9efb96638d0c403148 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Tue, 20 Jun 2023 00:26:22 +0200 Subject: [PATCH 02/17] introduce OptTensorType --- .gitignore | 1 + _doc/api/index.rst | 4 +-- _doc/api/{npx_annot.rst => npx_types.rst} | 42 +++++++++++++++-------- _doc/api/npx_var.rst | 13 +++++++ _unittests/onnx-numpy-skips.txt | 2 +- _unittests/test_array_api.bat | 4 +++ _unittests/ut_npx/test_npx.py | 30 +++++++++++++++- onnx_array_api/array_api/onnx_numpy.py | 14 ++++++-- onnx_array_api/npx/npx_functions.py | 19 +++++----- onnx_array_api/npx/npx_graph_builder.py | 15 ++++++-- onnx_array_api/npx/npx_jit_eager.py | 15 +++++--- onnx_array_api/npx/npx_types.py | 24 +++++++++---- onnx_array_api/npx/npx_var.py | 12 +++++-- 13 files changed, 151 insertions(+), 44 deletions(-) rename _doc/api/{npx_annot.rst => npx_types.rst} (82%) create mode 100644 _unittests/test_array_api.bat diff --git a/.gitignore b/.gitignore index f4d6253..08f95b5 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ _doc/_static/viz.js _unittests/ut__main/*.png _unittests/ut__main/_cache/* _unittests/ut__main/*.html +_unittests/.hypothesis/* diff --git a/_doc/api/index.rst b/_doc/api/index.rst index 75c0aa4..475fad6 100644 --- a/_doc/api/index.rst +++ b/_doc/api/index.rst @@ -8,10 +8,10 @@ API array_api npx_functions - npx_var npx_jit - npx_annot npx_numpy + npx_types + npx_var onnx_tools ort plotting diff --git a/_doc/api/npx_annot.rst b/_doc/api/npx_types.rst similarity index 82% rename from _doc/api/npx_annot.rst rename to _doc/api/npx_types.rst index 43de2d7..dc1a378 100644 --- a/_doc/api/npx_annot.rst +++ b/_doc/api/npx_types.rst @@ -1,38 +1,40 @@ -============= npx.npx_types ============= DType -===== ++++++ .. autoclass:: onnx_array_api.npx.npx_types.DType :members: -Annotations -=========== - ElemType ++++++++ .. autoclass:: onnx_array_api.npx.npx_types.ElemType :members: -ParType -+++++++ - -.. autoclass:: onnx_array_api.npx.npx_types.ParType - :members: - OptParType ++++++++++ .. autoclass:: onnx_array_api.npx.npx_types.OptParType :members: -TensorType -++++++++++ +OptTensorType ++++++++++++++ -.. autoclass:: onnx_array_api.npx.npx_types.TensorType +.. autoclass:: onnx_array_api.npx.npx_types.OptTensorType + :members: + +ParType ++++++++ + +.. autoclass:: onnx_array_api.npx.npx_types.ParType + :members: + +Scalar +++++++ + +.. autoclass:: onnx_array_api.npx.npx_types.Scalar :members: SequenceType @@ -41,6 +43,18 @@ SequenceType .. autoclass:: onnx_array_api.npx.npx_types.SequenceType :members: +ShapeType ++++++++++ + +.. autoclass:: onnx_array_api.npx.npx_types.ShapeType + :members: + +TensorType +++++++++++ + +.. autoclass:: onnx_array_api.npx.npx_types.TensorType + :members: + TupleType +++++++++ diff --git a/_doc/api/npx_var.rst b/_doc/api/npx_var.rst index 8041e5e..1f863fb 100644 --- a/_doc/api/npx_var.rst +++ b/_doc/api/npx_var.rst @@ -15,3 +15,16 @@ Cst, Input .. autoclass:: onnx_array_api.npx.npx_var.Input :members: + +ManyIdentity +++++++++++++ + +.. autoclass:: onnx_array_api.npx.npx_var.ManyIdentity + :members: + +Par ++++ + +.. autoclass:: onnx_array_api.npx.npx_var.Par + :members: + diff --git a/_unittests/onnx-numpy-skips.txt b/_unittests/onnx-numpy-skips.txt index e5c56f5..dcb067c 100644 --- a/_unittests/onnx-numpy-skips.txt +++ b/_unittests/onnx-numpy-skips.txt @@ -1,7 +1,7 @@ # API failures # see https://github.com/data-apis/array-api-tests/blob/master/numpy-skips.txt array_api_tests/test_creation_functions.py::test_asarray_scalars -array_api_tests/test_creation_functions.py::test_arange +# array_api_tests/test_creation_functions.py::test_arange array_api_tests/test_creation_functions.py::test_asarray_arrays array_api_tests/test_creation_functions.py::test_empty array_api_tests/test_creation_functions.py::test_empty_like diff --git a/_unittests/test_array_api.bat b/_unittests/test_array_api.bat new file mode 100644 index 0000000..1ec2833 --- /dev/null +++ b/_unittests/test_array_api.bat @@ -0,0 +1,4 @@ +@echo off +set ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy +python -m pytest ../../array-api-tests/array_api_tests/test_creation_functions.py::test_arange || exit 1 +python -m pytest ../../array-api-tests/array_api_tests/test_creation_functions.py --hypothesis-explain --skips-file=_unittests/onnx-numpy-skips.txt || exit 1 diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index 17b5863..9f03492 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -103,6 +103,7 @@ Int64, OptParType, TensorType, + OptTensorType, ) from onnx_array_api.npx.npx_var import Input, Var @@ -151,6 +152,33 @@ def test_tensor(self): self.assertRaise(lambda: TensorType[None], TypeError) self.assertRaise(lambda: TensorType[{np.float32, np.str_}], TypeError) + def test_opt_tensor(self): + dt = OptTensorType["float32"] + self.assertEqual(len(dt.dtypes), 1) + self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) + self.assertEmpty(dt.shape) + self.assertEqual(dt.type_name(), "OptTensorType['float32']") + + dt = OptTensorType["float32"] + self.assertEqual(len(dt.dtypes), 1) + self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) + self.assertEqual(dt.type_name(), "OptTensorType['float32']") + + dt = OptTensorType[np.float32] + self.assertEqual(len(dt.dtypes), 1) + self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) + self.assertEqual(dt.type_name(), "OptTensorType['float32']") + self.assertEmpty(dt.shape) + + dt = OptTensorType[np.str_] + self.assertEqual(len(dt.dtypes), 1) + self.assertEqual(dt.dtypes[0].dtype, ElemType.str_) + self.assertEqual(dt.type_name(), "OptTensorType[strings]") + self.assertEmpty(dt.shape) + + self.assertRaise(lambda: TensorType[None], TypeError) + self.assertRaise(lambda: TensorType[{np.float32, np.str_}], TypeError) + def test_superset(self): t1 = TensorType[ElemType.numerics] t2 = TensorType[ElemType.float64] @@ -2544,5 +2572,5 @@ def test_get_item(self): if __name__ == "__main__": - # TestNpx().test_get_item() + TestNpx().test_opt_tensor() unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index c56a4d8..8b74b92 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -20,7 +20,15 @@ from ..npx.npx_functions import ones as generic_ones from ..npx.npx_functions import zeros as generic_zeros from ..npx.npx_numpy_tensors import EagerNumpyTensor -from ..npx.npx_types import DType, ElemType, TensorType, OptParType, ParType, Scalar +from ..npx.npx_types import ( + DType, + ElemType, + TensorType, + OptParType, + OptTensorType, + ParType, + Scalar, +) from ._onnx_common import template_asarray from . import _finalize_array_api @@ -61,8 +69,8 @@ def asarray( def arange( start_or_stop: TensorType[ElemType.int64, "I", (1,)], - stop_or_step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, - step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, + stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, + step: OptTensorType[ElemType.int64, "I", (1,)] = None, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: print("####", start_or_stop, stop_or_step, step, dtype) diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 1de03ad..e134de0 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -1,4 +1,4 @@ -from typing import Optional, Tuple, Union +from typing import Tuple, Union import array_api_compat.numpy as np_array_api import numpy as np from onnx import FunctionProto, ModelProto, NodeProto, TensorProto @@ -11,11 +11,12 @@ DType, ElemType, OptParType, + OptTensorType, ParType, + Scalar, SequenceType, TensorType, TupleType, - Scalar, ) from .npx_var import Var @@ -45,7 +46,7 @@ def absolute( @npxapi_inline def all( x: TensorType[ElemType.bool_, "T"], - axis: Optional[TensorType[ElemType.int64, "I"]] = None, + axis: OptTensorType[ElemType.int64, "I"] = None, keepdims: ParType[int] = 0, ) -> TensorType[ElemType.bool_, "T"]: """ @@ -103,8 +104,8 @@ def amin( @npxapi_inline def arange( start_or_stop: TensorType[ElemType.int64, "I", (1,)], - stop_or_step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, - step: Optional[TensorType[ElemType.int64, "I", (1,)]] = None, + stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, + step: OptTensorType[ElemType.int64, "I", (1,)] = None, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." @@ -298,7 +299,7 @@ def cosh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, @npxapi_inline def cumsum( x: TensorType[ElemType.numerics, "T"], - axis: Optional[TensorType[ElemType.int64, "I"]] = None, + axis: OptTensorType[ElemType.int64, "I"] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.cumsum`." if axis is None: @@ -522,8 +523,8 @@ def ones( def pad( x: TensorType[ElemType.numerics, "T"], pads: TensorType[ElemType.int64, "I"], - constant_value: Optional[TensorType[ElemType.numerics, "T"]] = None, - axes: Optional[TensorType[ElemType.int64, "I"]] = None, + constant_value: OptTensorType[ElemType.numerics, "T"] = None, + axes: OptTensorType[ElemType.int64, "I"] = None, mode: ParType[str] = "constant", ): """ @@ -618,7 +619,7 @@ def sqrt(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, @npxapi_inline def squeeze( x: TensorType[ElemType.numerics, "T"], - axis: Optional[TensorType[ElemType.int64, "I"]] = None, + axis: OptTensorType[ElemType.int64, "I"] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.squeeze`." if axis is None: diff --git a/onnx_array_api/npx/npx_graph_builder.py b/onnx_array_api/npx/npx_graph_builder.py index ff02843..511b349 100644 --- a/onnx_array_api/npx/npx_graph_builder.py +++ b/onnx_array_api/npx/npx_graph_builder.py @@ -41,6 +41,7 @@ DType, ElemType, OptParType, + OptTensorType, ParType, SequenceType, TensorType, @@ -284,7 +285,9 @@ def _io( """ if self.as_function: return _FunctionIO(name) - if tensor_type is not None and not issubclass(tensor_type, TensorType): + if tensor_type is not None and not issubclass( + tensor_type, (TensorType, OptTensorType) + ): raise TypeError( f"Unexpected type {tensor_type.type_name()} for tensor_type. " f"This may happen if you specialised the function based on " @@ -494,7 +497,15 @@ def _function_to_onnx(self, fct: Callable, n_inputs: int, n_outputs: int): anno = par.annotation if not issubclass( anno, - (ElemType, OptParType, ParType, SequenceType, TensorType, TupleType), + ( + ElemType, + OptParType, + ParType, + SequenceType, + TensorType, + OptTensorType, + TupleType, + ), ): raise TypeError( f"Annotation must of a known not {type(anno)} for " diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index c222f01..018d456 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -148,6 +148,8 @@ def make_key(*values, **kwargs): else: raise TypeError(f"Input {iv} cannot have such tuple: {v}.") res.append(tuple(subkey)) + elif v is None: + res.append(v) else: raise TypeError( f"Unable to build a key, input {iv} has type {type(v)}." @@ -170,7 +172,8 @@ def make_key(*values, **kwargs): else: newv.append(t) res.append(tuple(newv)) - elif v is None and k in {"dtype"}: + elif v is None: + # optional parameter or inputs res.append(k) res.append(v) else: @@ -200,7 +203,9 @@ def to_jit(self, *values, **kwargs): for i, (v, iname) in enumerate(zip(values, names)): if i < len(annot_values) and not isinstance(annot_values[i], type): raise TypeError( - f"annotation {i} is not a type but is {annot_values[i]!r}." + f"annotation {i} is not a type but is " + f"{type(annot_values[i])!r}, " + f"annot_values[i]={annot_values[i]!r}, " f"for function {self.f} " f"from module {self.f.__module__!r}." ) @@ -217,9 +222,11 @@ def to_jit(self, *values, **kwargs): elif self.input_to_kwargs_ != input_to_kwargs: raise RuntimeError( f"Unexpected input and argument. Previous call produced " - f"self.input_to_kwargs_={self.input_to_kwargs_} and " + f"self.input_to_kwargs_={self.input_to_kwargs_}, " + f"self.n_inputs_={self.n_inputs_} and " f"input_to_kwargs={input_to_kwargs} for function {self.f} " - f"from module {self.f.__module__!r}." + f"from module {self.f.__module__!r}, " + f"len(values)={len(values)}, kwargs={kwargs!r}." ) elif self.input_to_kwargs_: constraints = {} diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index 0f7f6dc..1fa4ea7 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -93,12 +93,12 @@ def type_name(cls) -> str: class _DType2(DType): - "Wraps an into a different type." + "Wraps a type into a different type." pass class _DTypes(DType): - "Wraps an into a different type." + "Wraps a type into a different type." pass @@ -422,6 +422,8 @@ class TensorType(WrapperType): :param name: name of the type """ + main_name = "TensorType" + @classmethod def __class_getitem__(cls, *args): if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple): @@ -515,13 +517,13 @@ def type_name(cls) -> str: set_name = repr(st) if cls.shape: if cls.name: - newt = f"TensorType[{set_name}, {cls.shape!r}, {cls.name!r}]" + newt = f"{cls.main_name}[{set_name}, {cls.shape!r}, {cls.name!r}]" else: - newt = f"TensorType[{set_name}, {cls.shape!r}]" + newt = f"{cls.main_name}[{set_name}, {cls.shape!r}]" elif cls.name: - newt = f"TensorType[{set_name}, {cls.name!r}]" + newt = f"{cls.main_name}[{set_name}, {cls.name!r}]" else: - newt = f"TensorType[{set_name}]" + newt = f"{cls.main_name}[{set_name}]" if "<" in newt or "{" in newt: raise NameError(f"Name is wrong {newt!r}.") return newt @@ -560,6 +562,16 @@ def issuperset(cls, tensor_type: type) -> bool: return True +class OptTensorType(TensorType): + """ + Defines an optional tensor type. + + :param dtype: element type + """ + + main_name = "OptTensorType" + + class SequenceType(WrapperType): """ Defines a sequence of tensors. diff --git a/onnx_array_api/npx/npx_var.py b/onnx_array_api/npx/npx_var.py index a4802e3..e7ca0c2 100644 --- a/onnx_array_api/npx/npx_var.py +++ b/onnx_array_api/npx/npx_var.py @@ -4,7 +4,15 @@ from .._helpers import np_dtype_to_tensor_dtype from .npx_array_api import BaseArrayApi, ArrayApiError from .npx_constants import DEFAULT_OPSETS, ONNX_DOMAIN -from .npx_types import DType, ElemType, OptParType, ParType, TensorType, TupleType +from .npx_types import ( + DType, + ElemType, + OptParType, + ParType, + TensorType, + OptTensorType, + TupleType, +) class Par: @@ -852,7 +860,7 @@ def reshape(self, shape: "Var") -> "Var": def reduce_function( self, reduce_op, - axis: TensorType[ElemType.int64, "I"] = None, + axis: OptTensorType[ElemType.int64, "I"] = None, keepdims: ParType[int] = 0, ) -> "Var": "See :func:`numpy.sum` or any other reduce function." From 35e443e6554ecb2006d78c3b76cbcdb45954e09d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Tue, 20 Jun 2023 00:45:54 +0200 Subject: [PATCH 03/17] add more tests --- _unittests/ut_array_api/test_onnx_numpy.py | 18 +++++++++++++++++- onnx_array_api/array_api/onnx_numpy.py | 1 - onnx_array_api/npx/npx_jit_eager.py | 9 +++++---- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 4cb7544..2ed1274 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -19,6 +19,22 @@ def test_zeros(self): a = xp.absolute(mat) self.assertEqualArray(np.absolute(mat.numpy()), a.numpy()) + def test_arange(self): + a = EagerTensor(np.array([0], dtype=np.int64)) + b = EagerTensor(np.array([2], dtype=np.int64)) + mat = xp.arange(a, b) + matnp = mat.numpy() + self.assertEqual(matnp.shape, (2,)) + self.assertEqualArray(matnp, np.arange(0, 2).astype(np.int64)) + + def test_arange_step(self): + a = EagerTensor(np.array([4], dtype=np.int64)) + s = EagerTensor(np.array([2], dtype=np.int64)) + mat = xp.arange(a, step=s) + matnp = mat.numpy() + self.assertEqual(matnp.shape, (2,)) + self.assertEqualArray(matnp, np.arange(4, step=2).astype(np.int64)) + def test_zeros_none(self): c = EagerTensor(np.array([4, 5], dtype=np.int64)) mat = xp.zeros(c) @@ -54,5 +70,5 @@ def test_full_bool(self): if __name__ == "__main__": - TestOnnxNumpy().test_zeros_none() + TestOnnxNumpy().test_arange_step() unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index 8b74b92..5e61890 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -73,7 +73,6 @@ def arange( step: OptTensorType[ElemType.int64, "I", (1,)] = None, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: - print("####", start_or_stop, stop_or_step, step, dtype) if isinstance(start_or_stop, int): start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.int64)) if isinstance(stop_or_step, int): diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 018d456..19357d6 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -149,7 +149,7 @@ def make_key(*values, **kwargs): raise TypeError(f"Input {iv} cannot have such tuple: {v}.") res.append(tuple(subkey)) elif v is None: - res.append(v) + res.append(None) else: raise TypeError( f"Unable to build a key, input {iv} has type {type(v)}." @@ -174,8 +174,7 @@ def make_key(*values, **kwargs): res.append(tuple(newv)) elif v is None: # optional parameter or inputs - res.append(k) - res.append(v) + pass else: raise TypeError( f"Type {type(v)} is not yet supported, " @@ -269,7 +268,9 @@ def to_jit(self, *values, **kwargs): else: kwargs = kwargs.copy() kwargs.update(new_kwargs) - + print("***", inputs) + print(kwargs) + print(self.f) var = self.f(*inputs, **kwargs) onx = var.to_onnx( From a547fd6cbd932231c5a87337487265c054172524 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Tue, 20 Jun 2023 09:07:25 +0200 Subject: [PATCH 04/17] better error handling --- onnx_array_api/npx/npx_jit_eager.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 19357d6..5703d5e 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -120,12 +120,15 @@ def get_onnx(self, key: Optional[int] = None): ) return self.onxs[key] - @staticmethod - def make_key(*values, **kwargs): + def make_key(self, *values: List[Any], **kwargs: Dict[str, Any]) -> Tuple[Any, ...]: """ Builds a key based on the input types and parameters. Every set of inputs or parameters producing the same key (or signature) must use the same compiled ONNX. + + :param values: values given to the function + :param kwargs: parameters + :return: tuple of mutable keys """ res = [] for iv, v in enumerate(values): @@ -268,10 +271,12 @@ def to_jit(self, *values, **kwargs): else: kwargs = kwargs.copy() kwargs.update(new_kwargs) - print("***", inputs) - print(kwargs) - print(self.f) - var = self.f(*inputs, **kwargs) + try: + var = self.f(*inputs, **kwargs) + except TypeError as e: + raise TypeError( + f"Unexpected error, inputs={inputs}, kwargs={kwargs}." + ) from e onx = var.to_onnx( constraints=constraints, @@ -369,9 +374,14 @@ def jit_call(self, *values, **kwargs): # No jitting was ever called. try: onx, fct = self.to_jit(*values, **kwargs) + except TypeError as e: + raise TypeError( + f"ERROR with self.f={self.f}, " + f"values={values!r}, kwargs={kwargs!r}" + ) from e except Exception as e: raise RuntimeError( - f"ERROR with self.f={self.f}, " + f"Undefined ERROR with self.f={self.f}, " f"values={values!r}, kwargs={kwargs!r}" ) from e if self.input_to_kwargs_ is None: From 0913c83cd4672dc9b629b1b49a6b8992ef3c47b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Wed, 21 Jun 2023 00:22:29 +0200 Subject: [PATCH 05/17] add kwargs_to_input --- onnx_array_api/npx/npx_jit_eager.py | 55 +++++++++++++++++++++---- onnx_array_api/npx/npx_numpy_tensors.py | 2 +- 2 files changed, 49 insertions(+), 8 deletions(-) diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 5703d5e..0acda84 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -5,7 +5,7 @@ import numpy as np from .npx_tensors import EagerTensor, JitTensor -from .npx_types import DType, TensorType +from .npx_types import DType, OptTensorType, TensorType from .npx_var import Cst, Input, Var logger = getLogger("onnx-array-api") @@ -47,6 +47,7 @@ def __init__( # onnx to remember an input in fact a mandatory parameter. self.n_inputs_ = 0 self.input_to_kwargs_ = None + self.kwargs_to_input_ = None self.method_name_ = None def info(self, prefix: Optional[str] = None, method_name: Optional[str] = None): @@ -57,13 +58,14 @@ def info(self, prefix: Optional[str] = None, method_name: Optional[str] = None): logger.info("") return logger.info( - "%s [%s.%s] nx=%d ni=%d kw=%d f=%s.%s cl=%s me=%s", + "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s", prefix, self.__class__.__name__, method_name[:6], len(self.onxs), self.n_inputs_, 0 if self.input_to_kwargs_ is None else 1, + 0 if self.kwargs_to_input_ is None else 1, self.f.__module__, self.f.__name__, self.tensor_class.__name__, @@ -78,7 +80,8 @@ def status(self, me: str) -> str: f"[{self.__class__.__name__}.{me[:6]}]" f"nx={len(self.onxs)} " f"ni={self.n_inputs_} " - f"kw={0 if self.input_to_kwargs_ is None else 1} " + f"ikw={0 if self.input_to_kwargs_ is None else 1} " + f"kwi={0 if self.kwargs_to_input_ is None else 1} " f"f={self.f.__module__}.{self.f.__name__} " f"cl={self.tensor_class.__name__} " f"me={self.method_name_ or ''}" @@ -133,11 +136,23 @@ def make_key(self, *values: List[Any], **kwargs: Dict[str, Any]) -> Tuple[Any, . res = [] for iv, v in enumerate(values): if isinstance(v, (Var, EagerTensor, JitTensor)): + if iv in self.kwargs_to_input_: + raise RuntimeError( + f"Input {iv} should be a constant to be moved " + f"to the attribute list, v={v}." + ) res.append(v.key) elif isinstance(v, (int, float, bool, DType)): + if iv in self.kwargs_to_input_: + res.append(self.kwargs_to_input_[iv]) res.append(type(v)) res.append(v) elif isinstance(v, slice): + if iv in self.kwargs_to_input_: + raise NotImplementedError( + f"Input {iv} should be a constant to be moved " + f"to the attribute list, v={v}." + ) res.append(("slice", v.start, v.stop, v.step)) elif isinstance(v, type): res.append(("type", v.__name__)) @@ -152,6 +167,8 @@ def make_key(self, *values: List[Any], **kwargs: Dict[str, Any]) -> Tuple[Any, . raise TypeError(f"Input {iv} cannot have such tuple: {v}.") res.append(tuple(subkey)) elif v is None: + if iv in self.kwargs_to_input_: + res.append(self.kwargs_to_input_[iv]) res.append(None) else: raise TypeError( @@ -159,7 +176,10 @@ def make_key(self, *values: List[Any], **kwargs: Dict[str, Any]) -> Tuple[Any, . ) if kwargs: for k, v in sorted(kwargs.items()): - if isinstance(v, (int, float, str, type, bool, DType)): + if k in self.kwargs_to_input_: + res.append(type(v)) + res.append(v) + elif isinstance(v, (int, float, str, type, bool, DType)): res.append(k) res.append(type(v)) res.append(v) @@ -198,6 +218,7 @@ def to_jit(self, *values, **kwargs): annotations = self.f.__annotations__ if len(annotations) > 0: input_to_kwargs = {} + kwargs_to_input = {} names = list(annotations.keys()) annot_values = list(annotations.values()) constraints = {} @@ -215,12 +236,22 @@ def to_jit(self, *values, **kwargs): i >= len(annot_values) or issubclass(annot_values[i], TensorType) ): constraints[iname] = v.tensor_type_dims + elif ( + v is None + and i < len(annot_values) + and issubclass(annot_values[i], OptTensorType) + ): + constraints[iname] = annot_values[i] + kwargs_to_input[iname] = i, annot_values[i] else: new_kwargs[iname] = v input_to_kwargs[i] = iname if self.input_to_kwargs_ is None: - self.n_inputs_ = len(values) - len(input_to_kwargs) + self.n_inputs_ = ( + len(values) - len(input_to_kwargs) + len(kwargs_to_input) + ) self.input_to_kwargs_ = input_to_kwargs + self.kwargs_to_input_ = kwargs_to_input elif self.input_to_kwargs_ != input_to_kwargs: raise RuntimeError( f"Unexpected input and argument. Previous call produced " @@ -230,7 +261,16 @@ def to_jit(self, *values, **kwargs): f"from module {self.f.__module__!r}, " f"len(values)={len(values)}, kwargs={kwargs!r}." ) - elif self.input_to_kwargs_: + elif self.input_to_kwargs_ != input_to_kwargs: + raise RuntimeError( + f"Unexpected input and argument. Previous call produced " + f"self.kwargs_to_input_={self.kwargs_to_input_}, " + f"self.n_inputs_={self.n_inputs_} and " + f"kwargs_to_input={kwargs_to_input} for function {self.f} " + f"from module {self.f.__module__!r}, " + f"len(values)={len(values)}, kwargs={kwargs!r}." + ) + elif self.input_to_kwargs_ or self.kwargs_to_input_: constraints = {} new_kwargs = {} for i, (v, iname) in enumerate(zip(values, names)): @@ -275,7 +315,8 @@ def to_jit(self, *values, **kwargs): var = self.f(*inputs, **kwargs) except TypeError as e: raise TypeError( - f"Unexpected error, inputs={inputs}, kwargs={kwargs}." + f"Unexpected error, inputs={inputs}, kwargs={kwargs}, " + f"self.input_to_kwargs_={self.input_to_kwargs_}." ) from e onx = var.to_onnx( diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index f89ed9f..d0dc056 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -42,7 +42,7 @@ def run(self, *inputs: List["NumpyTensor"]) -> List["NumpyTensor"]: ) feeds = {} for name, inp in zip(self.input_names, inputs): - feeds[name] = inp.value + feeds[name] = None if inp is None else inp.value res = self.ref.run(None, feeds) return list(map(self.tensor_class, res)) From 672b510121492e6e646bbc31585653ab31c2e197 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Wed, 21 Jun 2023 00:43:33 +0200 Subject: [PATCH 06/17] fix inconcistencies --- onnx_array_api/npx/npx_jit_eager.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 0acda84..cab7802 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -252,20 +252,16 @@ def to_jit(self, *values, **kwargs): ) self.input_to_kwargs_ = input_to_kwargs self.kwargs_to_input_ = kwargs_to_input - elif self.input_to_kwargs_ != input_to_kwargs: + elif ( + self.input_to_kwargs_ != input_to_kwargs + or self.input_to_kwargs_ != input_to_kwargs + ): raise RuntimeError( f"Unexpected input and argument. Previous call produced " f"self.input_to_kwargs_={self.input_to_kwargs_}, " - f"self.n_inputs_={self.n_inputs_} and " - f"input_to_kwargs={input_to_kwargs} for function {self.f} " - f"from module {self.f.__module__!r}, " - f"len(values)={len(values)}, kwargs={kwargs!r}." - ) - elif self.input_to_kwargs_ != input_to_kwargs: - raise RuntimeError( - f"Unexpected input and argument. Previous call produced " f"self.kwargs_to_input_={self.kwargs_to_input_}, " f"self.n_inputs_={self.n_inputs_} and " + f"input_to_kwargs={input_to_kwargs}, " f"kwargs_to_input={kwargs_to_input} for function {self.f} " f"from module {self.f.__module__!r}, " f"len(values)={len(values)}, kwargs={kwargs!r}." @@ -295,6 +291,7 @@ def to_jit(self, *values, **kwargs): } self.n_inputs_ = len(values) self.input_to_kwargs_ = {} + self.kwargs_to_input_ = {} if self.output_types is not None: constraints.update(self.output_types) @@ -316,7 +313,8 @@ def to_jit(self, *values, **kwargs): except TypeError as e: raise TypeError( f"Unexpected error, inputs={inputs}, kwargs={kwargs}, " - f"self.input_to_kwargs_={self.input_to_kwargs_}." + f"self.input_to_kwargs_={self.input_to_kwargs_}, " + f"self.kwargs_to_input_={self.kwargs_to_input_}." ) from e onx = var.to_onnx( @@ -430,6 +428,11 @@ def jit_call(self, *values, **kwargs): f"Attribute 'input_to_kwargs_' should be set for " f"function {self.f} form module {self.f.__module__!r}." ) + if self.kwargs_to_input_ is None: + raise RuntimeError( + f"Attribute 'kwargs_to_input_' should be set for " + f"function {self.f} form module {self.f.__module__!r}." + ) else: onx, fct = None, None From d62bd44fcb48e03efe23187882b6d07a00241ee7 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Jun 2023 11:01:22 +0200 Subject: [PATCH 07/17] improvments --- _unittests/ut_npx/test_npx.py | 12 +++- ...t_array_api.bat => win_test_array_api.bat} | 0 onnx_array_api/npx/npx_jit_eager.py | 4 +- onnx_array_api/npx/npx_types.py | 19 ++++++ onnx_array_api/npx/npx_var.py | 67 +++++++++++++++---- 5 files changed, 86 insertions(+), 16 deletions(-) rename _unittests/{test_array_api.bat => win_test_array_api.bat} (100%) diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index 9f03492..e7dbaa2 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -2564,13 +2564,19 @@ def test_numpy_all_empty_axis_1(self): got = ref.run(None, {"A": data}) self.assertEqualArray(y, got[0]) - @unittest.skipIf(True, reason="Fails to follow Array API") - def test_get_item(self): + # @unittest.skipIf(True, reason="Fails to follow Array API") + def test_get_item_b(self): a = EagerNumpyTensor(np.array([True], dtype=np.bool_)) i = a[0] self.assertEqualArray(i.numpy(), a.numpy()[0]) + # @unittest.skipIf(True, reason="Fails to follow Array API") + def test_get_item_i8(self): + a = EagerNumpyTensor(np.array([5, 6], dtype=np.int8)) + i = a[0] + self.assertEqualArray(i.numpy(), a.numpy()[0]) + if __name__ == "__main__": - TestNpx().test_opt_tensor() + TestNpx().test_filter() unittest.main(verbosity=2) diff --git a/_unittests/test_array_api.bat b/_unittests/win_test_array_api.bat similarity index 100% rename from _unittests/test_array_api.bat rename to _unittests/win_test_array_api.bat diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index cab7802..74b8e68 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -297,7 +297,9 @@ def to_jit(self, *values, **kwargs): constraints.update(self.output_types) inputs = [ - Input(iname) for iname, v in zip(names, values) if iname in constraints + Input(iname, annotation=constraints[iname]) + for iname, v in zip(names, values) + if iname in constraints ] names = [i.name for i in inputs] if len(new_kwargs) > 0: diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index 1fa4ea7..77cc564 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -55,6 +55,8 @@ def np_dtype(self) -> "np.dtype": def __eq__(self, dt: "DType") -> bool: "Compares two types." + if dt is None: + return False if dt.__class__ is DType: return self.code_ == dt.code_ if isinstance(dt, (int, bool, str)): @@ -68,6 +70,8 @@ def __eq__(self, dt: "DType") -> bool: if dt in ElemType.numpy_map: dti = ElemType.numpy_map[dt] return self.code_ == dti.code_ + if issubclass(dt, ElemType): + return self.code_ == dt.dtype.code_ try: dti = np_dtype_to_tensor_dtype(dt) except KeyError: @@ -250,6 +254,9 @@ class ElemType(ElemTypeCst): @classmethod def __class_getitem__(cls, dtype: Union[str, DType]): + """ + Returns a subclass of this one with attribute `dtype`. + """ if isinstance(dtype, str): dtype = ElemType.names_int[dtype] elif dtype in ElemType.numpy_map: @@ -426,6 +433,10 @@ class TensorType(WrapperType): @classmethod def __class_getitem__(cls, *args): + """ + Returns a subclass of this one with two attributes `dtypes` + and `shape`. + """ if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple): args = args[0] name = None @@ -504,6 +515,14 @@ def __class_getitem__(cls, *args): raise NameError(f"Name is wrong {newt.__name__!r}.") return newt + @classmethod + def supports_dtype(cls, dtype: DType) -> bool: + """ + Determines if the element type `dtype` + is within `dtypes`. + """ + return dtype in cls.dtypes + @classmethod def type_name(cls) -> str: "Returns its full name." diff --git a/onnx_array_api/npx/npx_var.py b/onnx_array_api/npx/npx_var.py index e7ca0c2..07b61e4 100644 --- a/onnx_array_api/npx/npx_var.py +++ b/onnx_array_api/npx/npx_var.py @@ -1,6 +1,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np -from onnx import FunctionProto, ModelProto, NodeProto +from onnx import FunctionProto, ModelProto, NodeProto, TensorProto from .._helpers import np_dtype_to_tensor_dtype from .npx_array_api import BaseArrayApi, ArrayApiError from .npx_constants import DEFAULT_OPSETS, ONNX_DOMAIN @@ -288,18 +288,19 @@ def __getitem__(self, *args): def __init__( self, *inputs: List[Any], - op: Union[ - Callable, str, Tuple[str, str], FunctionProto, ModelProto, NodeProto + op: Optional[ + Union[Callable, str, Tuple[str, str], FunctionProto, ModelProto, NodeProto] ] = None, - dtype: Union[type, DType] = None, + dtype: Optional[Union[type, DType]] = None, inline: bool = False, - n_var_outputs: Optional[int] = 1, + n_var_outputs: int = 1, input_indices: Optional[List[int]] = None, **kwargs, ): self.inputs = list(inputs) self.n_var_outputs = n_var_outputs self.inline = inline + self._annotation = None if op is None: self.onnx_op = None # a constant elif isinstance(op, tuple): @@ -362,6 +363,16 @@ def __init__( self.set = Var._setter(self) self.current_var_ = None + @property + def annotation(self): + """Returns a type if known for the Var itself.""" + if self._annotation is None: + if "dtype" in self.onnx_op_kwargs: + dtype = self.onnx_op_kwargs["dtype"] + if isinstance(dtype, DType): + return TensorType[dtype] + return self._annotation + @property def self_var(self): """ @@ -970,11 +981,35 @@ def __getitem__(self, index: Any) -> "Var": if isinstance(index, Var): # scenario 2 - # TODO: fix this when index is an integer - new_shape = cst(np.array([-1], dtype=np.int64)) - new_self = self.reshape(new_shape) - new_index = index.reshape(new_shape) - return var(new_self, new_index, op="Compress") + # we rely on the annotation if it exists + if index.annotation is None: + dtype_bool = True + elif issubclass(index.annotation, TensorType): + if index.annotation.supports_dtype(DType(TensorProto.INT64)): + dtype_bool = False + elif index.annotation.supports_dtype(DType(TensorProto.BOOL)): + dtype_bool = True + else: + raise TypeError( + f"Unexpected dtype for annotation={index.annotation!r} " + f"for index={index!r}." + ) + else: + raise TypeError( + f"Unexpected annotation={index.annotation!r} " + f"for index={index!r}." + ) + + if dtype_bool: + # TODO: fix this when index is an integer and the annotation unknown + # it needs to support subgraph and tests + new_shape = cst(np.array([-1], dtype=np.int64)) + new_self = self.reshape(new_shape) + new_index = index.reshape(new_shape) + return var(new_self, new_index, op="Compress") + + # dtype is int + return var(self, index, axis=0, op="Gather") if isinstance(index, int): # Use Gather instead. @@ -1097,15 +1132,23 @@ class Input(Var): Defines an input, a placeholder. :param name: input name or None if undefined + :param annotation: annotation if any is available """ - def __init__(self, name=None): + def __init__(self, name: str = None, annotation: Optional[type] = None): Var.__init__(self) self.name = name self._prefix = name or "I" + self._annotation = annotation def __repr__(self): - return f"{self.__class__.__name__}({self.name!r})" + if self.annotation is None: + return f"{self.__class__.__name__}({self.name!r})" + return f"{self.__class__.__name__}({self.name!r}, {self._annotation})" + + @property + def annotation(self): + return self._annotation class Cst(Var): From 81d48e78ec347d69a2f1a3c82ff62387fdf5a1a1 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Jun 2023 23:50:45 +0200 Subject: [PATCH 08/17] fix one type issue --- onnx_array_api/npx/npx_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index 77cc564..beef919 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -70,7 +70,7 @@ def __eq__(self, dt: "DType") -> bool: if dt in ElemType.numpy_map: dti = ElemType.numpy_map[dt] return self.code_ == dti.code_ - if issubclass(dt, ElemType): + if isinstance(dt, type) and issubclass(dt, ElemType): return self.code_ == dt.dtype.code_ try: dti = np_dtype_to_tensor_dtype(dt) From 3a0c5b2298f5d2ebc1a7d299d22e51f01bc44da1 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 00:09:00 +0200 Subject: [PATCH 09/17] issue with windows --- _unittests/ut_array_api/test_onnx_numpy.py | 2 +- onnx_array_api/npx/npx_var.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 2ed1274..0c60f1d 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -19,7 +19,7 @@ def test_zeros(self): a = xp.absolute(mat) self.assertEqualArray(np.absolute(mat.numpy()), a.numpy()) - def test_arange(self): + def test_arange_default(self): a = EagerTensor(np.array([0], dtype=np.int64)) b = EagerTensor(np.array([2], dtype=np.int64)) mat = xp.arange(a, b) diff --git a/onnx_array_api/npx/npx_var.py b/onnx_array_api/npx/npx_var.py index 07b61e4..90022c6 100644 --- a/onnx_array_api/npx/npx_var.py +++ b/onnx_array_api/npx/npx_var.py @@ -985,7 +985,9 @@ def __getitem__(self, index: Any) -> "Var": if index.annotation is None: dtype_bool = True elif issubclass(index.annotation, TensorType): - if index.annotation.supports_dtype(DType(TensorProto.INT64)): + if index.annotation.supports_dtype( + DType(TensorProto.INT64) + ) or index.annotation.supports_dtype(DType(TensorProto.INT32)): dtype_bool = False elif index.annotation.supports_dtype(DType(TensorProto.BOOL)): dtype_bool = True @@ -1144,7 +1146,10 @@ def __init__(self, name: str = None, annotation: Optional[type] = None): def __repr__(self): if self.annotation is None: return f"{self.__class__.__name__}({self.name!r})" - return f"{self.__class__.__name__}({self.name!r}, {self._annotation})" + return ( + f"{self.__class__.__name__}({self.name!r}, " + f"{self._annotation.__name__!r})" + ) @property def annotation(self): From 5da275ed426df578994c98a52109849d04ef5dfd Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 00:46:03 +0200 Subject: [PATCH 10/17] set --- _unittests/ut_array_api/test_onnx_numpy.py | 16 ++++++- _unittests/ut_npx/test_npx.py | 2 - .../test_documentation_examples.py | 0 .../test_profiling.py | 0 onnx_array_api/array_api/onnx_numpy.py | 9 ++++ onnx_array_api/npx/npx_functions.py | 44 +++++++++++++++++-- onnx_array_api/npx/npx_graph_builder.py | 5 ++- onnx_array_api/npx/npx_types.py | 2 +- 8 files changed, 69 insertions(+), 9 deletions(-) rename _unittests/{ut__main => ut_xrun_doc}/test_documentation_examples.py (100%) rename _unittests/{ut__main => ut_xrun_doc}/test_profiling.py (100%) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 0c60f1d..6eebceb 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -68,7 +68,21 @@ def test_full_bool(self): self.assertNotEmpty(matnp[0, 0]) self.assertEqualArray(matnp, np.full((4, 5), False)) + def test_arange_int00a(self): + a = EagerTensor(np.array([0], dtype=np.int64)) + b = EagerTensor(np.array([0], dtype=np.int64)) + mat = xp.arange(a, b) + matnp = mat.numpy() + self.assertEqual(matnp.shape, (2,)) + self.assertEqualArray(matnp, np.arange(0, 0)) + + def test_arange_int00(self): + mat = xp.arange(0, 0) + matnp = mat.numpy() + self.assertEqual(matnp.shape, (2,)) + self.assertEqualArray(matnp, np.arange(0, 0)) + if __name__ == "__main__": - TestOnnxNumpy().test_arange_step() + # TestOnnxNumpy().test_arange_int00() unittest.main(verbosity=2) diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index e7dbaa2..1a4fcdb 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -2564,13 +2564,11 @@ def test_numpy_all_empty_axis_1(self): got = ref.run(None, {"A": data}) self.assertEqualArray(y, got[0]) - # @unittest.skipIf(True, reason="Fails to follow Array API") def test_get_item_b(self): a = EagerNumpyTensor(np.array([True], dtype=np.bool_)) i = a[0] self.assertEqualArray(i.numpy(), a.numpy()[0]) - # @unittest.skipIf(True, reason="Fails to follow Array API") def test_get_item_i8(self): a = EagerNumpyTensor(np.array([5, 6], dtype=np.int8)) i = a[0] diff --git a/_unittests/ut__main/test_documentation_examples.py b/_unittests/ut_xrun_doc/test_documentation_examples.py similarity index 100% rename from _unittests/ut__main/test_documentation_examples.py rename to _unittests/ut_xrun_doc/test_documentation_examples.py diff --git a/_unittests/ut__main/test_profiling.py b/_unittests/ut_xrun_doc/test_profiling.py similarity index 100% rename from _unittests/ut__main/test_profiling.py rename to _unittests/ut_xrun_doc/test_profiling.py diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index 5e61890..c049dba 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -75,10 +75,19 @@ def arange( ) -> TensorType[ElemType.numerics, "T"]: if isinstance(start_or_stop, int): start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.int64)) + if isinstance(start_or_stop, float): + start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.float64)) + if isinstance(stop_or_step, int): stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.int64)) + if isinstance(stop_or_step, float): + stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.float64)) + if isinstance(step, int): step = EagerNumpyTensor(np.array([step], dtype=np.int64)) + if isinstance(step, float): + step = EagerNumpyTensor(np.array([step], dtype=np.float64)) + return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index e134de0..8113a58 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -100,12 +100,50 @@ def amin( """ return var(x, op="ArgMin", axis=axis, keepdims=keepdims) + numerics = { + ElemType.int16, + ElemType.int32, + ElemType.int64, + ElemType.float32, + ElemType.float64, + } + @npxapi_inline def arange( - start_or_stop: TensorType[ElemType.int64, "I", (1,)], - stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, - step: OptTensorType[ElemType.int64, "I", (1,)] = None, + start_or_stop: TensorType[ + { + ElemType.int16, + ElemType.int32, + ElemType.int64, + ElemType.float32, + ElemType.float64, + }, + "I", + (1,), + ], + stop_or_step: OptTensorType[ + { + ElemType.int16, + ElemType.int32, + ElemType.int64, + ElemType.float32, + ElemType.float64, + }, + "I", + (1,), + ] = None, + step: OptTensorType[ + { + ElemType.int16, + ElemType.int32, + ElemType.int64, + ElemType.float32, + ElemType.float64, + }, + "I", + (1,), + ] = None, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." diff --git a/onnx_array_api/npx/npx_graph_builder.py b/onnx_array_api/npx/npx_graph_builder.py index 511b349..563406c 100644 --- a/onnx_array_api/npx/npx_graph_builder.py +++ b/onnx_array_api/npx/npx_graph_builder.py @@ -332,13 +332,14 @@ def _io( if is_input: raise RuntimeError( f"tensor_type cannot be None for name={name!r} and " - f"input or output {index}." + f"input or output {index!r}." ) tensor_type = TensorType["undefined"] if len(tensor_type.dtypes) != 1: raise RuntimeError( f"tensor_type is not specific enough ({str(tensor_type)} " - f"or its full representation {tensor_type!r})." + f"or its full representation {tensor_type!r}, " + f"is_input={is_input}, index={index})." ) if tensor_type.shape is None: type_proto = TypeProto() diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index beef919..8ef66d2 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -543,7 +543,7 @@ def type_name(cls) -> str: newt = f"{cls.main_name}[{set_name}, {cls.name!r}]" else: newt = f"{cls.main_name}[{set_name}]" - if "<" in newt or "{" in newt: + if "<" in newt: raise NameError(f"Name is wrong {newt!r}.") return newt From b46e7d2a6bc644e6b4fa93167d09e1f2ca5bcfe8 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 00:46:32 +0200 Subject: [PATCH 11/17] remove unnecessary code --- onnx_array_api/npx/npx_functions.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 8113a58..27147c4 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -100,14 +100,6 @@ def amin( """ return var(x, op="ArgMin", axis=axis, keepdims=keepdims) - numerics = { - ElemType.int16, - ElemType.int32, - ElemType.int64, - ElemType.float32, - ElemType.float64, - } - @npxapi_inline def arange( From 02a912d0597faa6861f536c82ceadbab69785b5d Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 12:13:25 +0200 Subject: [PATCH 12/17] improvments --- _unittests/ut_array_api/test_onnx_numpy.py | 2 +- onnx_array_api/npx/npx_graph_builder.py | 36 +++++++++++++++++--- onnx_array_api/npx/npx_numpy_tensors.py | 3 +- onnx_array_api/npx/npx_types.py | 38 ++++++++++++---------- 4 files changed, 55 insertions(+), 24 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 6eebceb..870a701 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -84,5 +84,5 @@ def test_arange_int00(self): if __name__ == "__main__": - # TestOnnxNumpy().test_arange_int00() + TestOnnxNumpy().test_arange_int00() unittest.main(verbosity=2) diff --git a/onnx_array_api/npx/npx_graph_builder.py b/onnx_array_api/npx/npx_graph_builder.py index 563406c..3da2280 100644 --- a/onnx_array_api/npx/npx_graph_builder.py +++ b/onnx_array_api/npx/npx_graph_builder.py @@ -271,7 +271,7 @@ def make_node( self.nodes_.append(node) def _io( - self, index: int, name: str, tensor_type: Optional[type], is_input: bool + self, index: int, name: str, tensor_type: type, is_input: bool ) -> ValueInfoProto: """ Converts an input or output into :class:`onnx.ValueInfoProto`. @@ -335,16 +335,42 @@ def _io( f"input or output {index!r}." ) tensor_type = TensorType["undefined"] - if len(tensor_type.dtypes) != 1: + + dtype_code = None + if len(tensor_type.dtypes) == 1: + dtype_code = tensor_type.dtypes[0].dtype + else: + # Case when the constraints is too broad. + # We use the input type if available. + if index < len(self.inputs_): + use = self.inputs_[index] + else: + use = None + c_name = tensor_type.name + # const = self.constraints[c_name] + for i in range(len(self.inputs_)): + name = self.inputs_[i].name + if ( + name in self.constraints + and self.constraints[name].name == c_name + ): + use = self.inputs_[i] + if use is not None: + dtype_code = DType(use.type.tensor_type.elem_type) + + if dtype_code is None: raise RuntimeError( f"tensor_type is not specific enough ({str(tensor_type)} " f"or its full representation {tensor_type!r}, " - f"is_input={is_input}, index={index})." + f"is_input={is_input}, index={index}/{len(self.inputs_)}, " + f"self.constraints={self.constraints!r}, " + f"self.inputs_={self.inputs_})." ) + if tensor_type.shape is None: type_proto = TypeProto() tensor_type_proto = type_proto.tensor_type - tensor_type_proto.elem_type = tensor_type.dtypes[0].dtype.code + tensor_type_proto.elem_type = dtype_code.code value_info_proto = ValueInfoProto() value_info_proto.name = name # tensor_type_proto.shape.dim.extend([]) @@ -355,7 +381,7 @@ def _io( # with fixed rank. This can be changed here and in methods # `make_key`. shape = [None for _ in tensor_type.shape] - info = make_tensor_value_info(name, tensor_type.dtypes[0].dtype.code, shape) + info = make_tensor_value_info(name, dtype_code.code, shape) # check_value_info fails if the shape is left undefined check_value_info(info, self.check_context) return info diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index d0dc056..4338b90 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -131,7 +131,8 @@ def tensor_type_dims(self) -> TensorType: Different keys usually means same ONNX graph but different input shapes. """ - return TensorType[self.dtype, self.dims] + dt = self.dtype + return TensorType[dt, self.dims, f"xi{dt.code}"] @classmethod def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index 8ef66d2..d0602a2 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -113,23 +113,23 @@ class ElemTypeCstInner(WrapperType): __slots__ = [] - undefined = DType(0) - bool_ = DType(9) - int8 = DType(3) - int16 = DType(5) - int32 = DType(6) - int64 = DType(7) - uint8 = DType(2) - uint16 = DType(4) - uint32 = DType(12) - uint64 = DType(13) - float16 = DType(10) - float32 = DType(1) - float64 = DType(11) - bfloat16 = DType(16) - complex64 = DType(14) - complex128 = DType(15) - str_ = DType(8) + undefined = DType(TensorProto.UNDEFINED) # 0 + bool_ = DType(TensorProto.BOOL) # 9 + int8 = DType(TensorProto.INT8) # 3 + int16 = DType(TensorProto.INT16) # 5 + int32 = DType(TensorProto.INT32) # 6 + int64 = DType(TensorProto.INT64) # 7 + uint8 = DType(TensorProto.UINT8) # 2 + uint16 = DType(TensorProto.UINT16) # 4 + uint32 = DType(TensorProto.UINT32) # 12 + uint64 = DType(TensorProto.UINT64) # 13 + float16 = DType(TensorProto.FLOAT16) # 10 + float32 = DType(TensorProto.FLOAT) # 1 + float64 = DType(TensorProto.DOUBLE) # 11 + bfloat16 = DType(TensorProto.BFLOAT16) # 16 + complex64 = DType(TensorProto.COMPLEX64) # 14 + complex128 = DType(TensorProto.COMPLEX128) # 15 + str_ = DType(TensorProto.STRING) # 8 class ElemTypeCstSet(ElemTypeCstInner): @@ -513,6 +513,10 @@ def __class_getitem__(cls, *args): ) if "<" in newt.__name__: raise NameError(f"Name is wrong {newt.__name__!r}.") + if newt.name is None: + raise RuntimeError( + f"A constraint needs a name but none is given: args={args}." + ) return newt @classmethod From 3b834fe8c2fd2b6a45a4260aa6560a842c237a2c Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 14:43:24 +0200 Subject: [PATCH 13/17] fix names --- _unittests/ut_npx/test_npx.py | 42 ++++++++++++------------- onnx_array_api/npx/npx_graph_builder.py | 2 +- onnx_array_api/npx/npx_types.py | 2 +- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index 1a4fcdb..7a5b33a 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -126,62 +126,62 @@ def test_shape_inference(self): self.assertEqual(output.type.tensor_type.elem_type, TensorProto.FLOAT) def test_tensor(self): - dt = TensorType["float32"] + dt = TensorType["float32", "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) self.assertEmpty(dt.shape) - self.assertEqual(dt.type_name(), "TensorType['float32']") + self.assertEqual(dt.type_name(), "TensorType['float32', 'F32']") - dt = TensorType["float32"] + dt = TensorType["float32", "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) - self.assertEqual(dt.type_name(), "TensorType['float32']") + self.assertEqual(dt.type_name(), "TensorType['float32', 'F32']") - dt = TensorType[np.float32] + dt = TensorType[np.float32, "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) - self.assertEqual(dt.type_name(), "TensorType['float32']") + self.assertEqual(dt.type_name(), "TensorType['float32', 'F32']") self.assertEmpty(dt.shape) - dt = TensorType[np.str_] + dt = TensorType[np.str_, "TEXT"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.str_) - self.assertEqual(dt.type_name(), "TensorType[strings]") + self.assertEqual(dt.type_name(), "TensorType[strings, 'TEXT']") self.assertEmpty(dt.shape) self.assertRaise(lambda: TensorType[None], TypeError) self.assertRaise(lambda: TensorType[{np.float32, np.str_}], TypeError) def test_opt_tensor(self): - dt = OptTensorType["float32"] + dt = OptTensorType["float32", "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) self.assertEmpty(dt.shape) - self.assertEqual(dt.type_name(), "OptTensorType['float32']") + self.assertEqual(dt.type_name(), "OptTensorType['float32', 'F32']") - dt = OptTensorType["float32"] + dt = OptTensorType["float32", "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) - self.assertEqual(dt.type_name(), "OptTensorType['float32']") + self.assertEqual(dt.type_name(), "OptTensorType['float32', 'F32']") - dt = OptTensorType[np.float32] + dt = OptTensorType[np.float32, "F32"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.float32) - self.assertEqual(dt.type_name(), "OptTensorType['float32']") + self.assertEqual(dt.type_name(), "OptTensorType['float32', 'F32']") self.assertEmpty(dt.shape) - dt = OptTensorType[np.str_] + dt = OptTensorType[np.str_, "TEXT"] self.assertEqual(len(dt.dtypes), 1) self.assertEqual(dt.dtypes[0].dtype, ElemType.str_) - self.assertEqual(dt.type_name(), "OptTensorType[strings]") + self.assertEqual(dt.type_name(), "OptTensorType[strings, 'TEXT']") self.assertEmpty(dt.shape) self.assertRaise(lambda: TensorType[None], TypeError) self.assertRaise(lambda: TensorType[{np.float32, np.str_}], TypeError) def test_superset(self): - t1 = TensorType[ElemType.numerics] - t2 = TensorType[ElemType.float64] + t1 = TensorType[ElemType.numerics, "T"] + t2 = TensorType[ElemType.float64, "F64"] self.assertTrue(t1.issuperset(t2)) t1 = Float32[None] t2 = Float32[None] @@ -195,14 +195,14 @@ def test_superset(self): t1 = Float32["N"] t2 = Float32[5] self.assertTrue(t1.issuperset(t2)) - t1 = TensorType[ElemType.int64] + t1 = TensorType[ElemType.int64, "I"] t2 = Int64[1] self.assertTrue(t1.issuperset(t2)) def test_sig(self): def local1( - x: TensorType[ElemType.floats], - ) -> TensorType[ElemType.floats]: + x: TensorType[ElemType.floats, "T"], + ) -> TensorType[ElemType.floats, "T"]: return x def local2( diff --git a/onnx_array_api/npx/npx_graph_builder.py b/onnx_array_api/npx/npx_graph_builder.py index 3da2280..bd0627e 100644 --- a/onnx_array_api/npx/npx_graph_builder.py +++ b/onnx_array_api/npx/npx_graph_builder.py @@ -334,7 +334,7 @@ def _io( f"tensor_type cannot be None for name={name!r} and " f"input or output {index!r}." ) - tensor_type = TensorType["undefined"] + tensor_type = TensorType["undefined", "xxu"] dtype_code = None if len(tensor_type.dtypes) == 1: diff --git a/onnx_array_api/npx/npx_types.py b/onnx_array_api/npx/npx_types.py index d0602a2..f9029f8 100644 --- a/onnx_array_api/npx/npx_types.py +++ b/onnx_array_api/npx/npx_types.py @@ -696,7 +696,7 @@ def _make_type(name: str, elem_type: int): def class_getitem(cls, shape: Union[int, ShapeType]) -> TensorType: if isinstance(shape, int): shape = (shape,) - return TensorType[elem_type, shape] + return TensorType[elem_type, shape, f"mtx{elem_type}"] new_type = type(name, tuple(), {}) new_type.__class_getitem__ = classmethod(class_getitem) From d368be517ea80d0eb0489115a4cc1f9809f0d8d5 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Thu, 22 Jun 2023 15:02:19 +0200 Subject: [PATCH 14/17] fix missing name --- onnx_array_api/npx/npx_numpy_tensors.py | 2 +- onnx_array_api/ort/ort_tensors.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index 4338b90..dc87237 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -132,7 +132,7 @@ def tensor_type_dims(self) -> TensorType: input shapes. """ dt = self.dtype - return TensorType[dt, self.dims, f"xi{dt.code}"] + return TensorType[dt, self.dims, f"xnp{dt.code}"] @classmethod def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: diff --git a/onnx_array_api/ort/ort_tensors.py b/onnx_array_api/ort/ort_tensors.py index db9d4d5..3b7a5c8 100644 --- a/onnx_array_api/ort/ort_tensors.py +++ b/onnx_array_api/ort/ort_tensors.py @@ -191,7 +191,8 @@ def tensor_type_dims(self) -> TensorType: Different keys usually means same ONNX graph but different input shapes. """ - return TensorType[self.dtype, self.dims] + dt = self.dtype + return TensorType[dt, self.dims, f"xort{dt.code}"] @classmethod def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: From 65dbe3a755516caeeebeb1b808d0642bc1082388 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 23 Jun 2023 00:47:39 +0200 Subject: [PATCH 15/17] fix arange --- _unittests/ut_array_api/test_onnx_numpy.py | 4 ++-- onnx_array_api/npx/npx_graph_builder.py | 1 - onnx_array_api/npx/npx_jit_eager.py | 6 +++--- onnx_array_api/npx/npx_numpy_tensors.py | 7 ++++--- onnx_array_api/ort/ort_tensors.py | 7 ++++--- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 870a701..007965c 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -73,13 +73,13 @@ def test_arange_int00a(self): b = EagerTensor(np.array([0], dtype=np.int64)) mat = xp.arange(a, b) matnp = mat.numpy() - self.assertEqual(matnp.shape, (2,)) + self.assertEqual(matnp.shape, (0,)) self.assertEqualArray(matnp, np.arange(0, 0)) def test_arange_int00(self): mat = xp.arange(0, 0) matnp = mat.numpy() - self.assertEqual(matnp.shape, (2,)) + self.assertEqual(matnp.shape, (0,)) self.assertEqualArray(matnp, np.arange(0, 0)) diff --git a/onnx_array_api/npx/npx_graph_builder.py b/onnx_array_api/npx/npx_graph_builder.py index bd0627e..396cf39 100644 --- a/onnx_array_api/npx/npx_graph_builder.py +++ b/onnx_array_api/npx/npx_graph_builder.py @@ -347,7 +347,6 @@ def _io( else: use = None c_name = tensor_type.name - # const = self.constraints[c_name] for i in range(len(self.inputs_)): name = self.inputs_[i].name if ( diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 74b8e68..58ffff6 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -235,7 +235,7 @@ def to_jit(self, *values, **kwargs): if isinstance(v, (EagerTensor, JitTensor)) and ( i >= len(annot_values) or issubclass(annot_values[i], TensorType) ): - constraints[iname] = v.tensor_type_dims + constraints[iname] = v.tensor_type_dims(annot_values[i].name) elif ( v is None and i < len(annot_values) @@ -278,14 +278,14 @@ def to_jit(self, *values, **kwargs): ) and i not in self.input_to_kwargs_ ): - constraints[iname] = v.tensor_type_dims + constraints[iname] = v.tensor_type_dims(iname) else: new_kwargs[iname] = v else: names = [f"x{i}" for i in range(len(values))] new_kwargs = {} constraints = { - iname: v.tensor_type_dims + iname: v.tensor_type_dims(iname) for i, (v, iname) in enumerate(zip(values, names)) if isinstance(v, (EagerTensor, JitTensor)) } diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index dc87237..5a41cc8 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -122,17 +122,18 @@ def shape(self) -> Tuple[int, ...]: "Returns the shape of the tensor." return self._tensor.shape - @property - def tensor_type_dims(self) -> TensorType: + def tensor_type_dims(self, name: str) -> TensorType: """ Returns the tensor type of this tensor. This property is used to define a key used to cache a jitted function. Same keys keys means same ONNX graph. Different keys usually means same ONNX graph but different input shapes. + + :param name: name of the constraint """ dt = self.dtype - return TensorType[dt, self.dims, f"xnp{dt.code}"] + return TensorType[dt, self.dims, name] @classmethod def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: diff --git a/onnx_array_api/ort/ort_tensors.py b/onnx_array_api/ort/ort_tensors.py index 3b7a5c8..f4f447d 100644 --- a/onnx_array_api/ort/ort_tensors.py +++ b/onnx_array_api/ort/ort_tensors.py @@ -182,17 +182,18 @@ def dims(self): return tuple(self._tensor.shape()) return (None, *tuple(self.shape[1:])) - @property - def tensor_type_dims(self) -> TensorType: + def tensor_type_dims(self, name: str) -> TensorType: """ Returns the tensor type of this tensor. This property is used to define a key used to cache a jitted function. Same keys keys means same ONNX graph. Different keys usually means same ONNX graph but different input shapes. + + :param name: name of the constraint """ dt = self.dtype - return TensorType[dt, self.dims, f"xort{dt.code}"] + return TensorType[dt, self.dims, name] @classmethod def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: From 95bbeae92dcccc279001b5c807f72a3d6b8db764 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 23 Jun 2023 01:08:21 +0200 Subject: [PATCH 16/17] fix arange --- onnx_array_api/array_api/onnx_numpy.py | 27 ++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index c049dba..9aab6f8 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -3,6 +3,7 @@ """ from typing import Any, Optional import numpy as np +from onnx import TensorProto from ..npx.npx_functions import ( all, abs, @@ -73,21 +74,35 @@ def arange( step: OptTensorType[ElemType.int64, "I", (1,)] = None, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: + use_float = any( + map(lambda x: isinstance(x, float), [start_or_stop, stop_or_step, step]) + ) if isinstance(start_or_stop, int): - start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.int64)) - if isinstance(start_or_stop, float): + start_or_stop = EagerNumpyTensor( + np.array([start_or_stop], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(start_or_stop, float): start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.float64)) + assert use_float if isinstance(stop_or_step, int): - stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.int64)) - if isinstance(stop_or_step, float): + stop_or_step = EagerNumpyTensor( + np.array([stop_or_step], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(stop_or_step, float): stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.float64)) + assert use_float if isinstance(step, int): - step = EagerNumpyTensor(np.array([step], dtype=np.int64)) - if isinstance(step, float): + step = EagerNumpyTensor( + np.array([step], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(step, float): step = EagerNumpyTensor(np.array([step], dtype=np.float64)) + assert use_float + if dtype is None and use_float: + dtype = DType(TensorProto.DOUBLE) return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) From f981678b7773d63be69155571db78f1e01a027c4 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 23 Jun 2023 01:21:22 +0200 Subject: [PATCH 17/17] fix unit test for windows --- _unittests/ut_array_api/test_onnx_numpy.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 007965c..23852c7 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -1,3 +1,4 @@ +import sys import unittest import numpy as np from onnx_array_api.ext_test_case import ExtTestCase @@ -74,13 +75,19 @@ def test_arange_int00a(self): mat = xp.arange(a, b) matnp = mat.numpy() self.assertEqual(matnp.shape, (0,)) - self.assertEqualArray(matnp, np.arange(0, 0)) + expected = np.arange(0, 0) + if sys.platform == "win32": + expected = expected.astype(np.int64) + self.assertEqualArray(matnp, expected) def test_arange_int00(self): mat = xp.arange(0, 0) matnp = mat.numpy() self.assertEqual(matnp.shape, (0,)) - self.assertEqualArray(matnp, np.arange(0, 0)) + expected = np.arange(0, 0) + if sys.platform == "win32": + expected = expected.astype(np.int64) + self.assertEqualArray(matnp, expected) if __name__ == "__main__":