From ebf8016628992505fd038d1df67cadbb3246f227 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 23 Jun 2023 12:57:17 +0200 Subject: [PATCH 01/20] Test array-api-tests and add ones_like --- _unittests/test_array_api.sh | 4 +- .../ut_array_api/test_hypothesis_array_api.py | 114 ++++++++++++++++++ _unittests/ut_array_api/test_onnx_numpy.py | 9 +- _unittests/ut_npx/test_npx.py | 5 +- onnx_array_api/array_api/_onnx_common.py | 2 + onnx_array_api/array_api/onnx_numpy.py | 2 + onnx_array_api/npx/npx_functions.py | 28 ++++- onnx_array_api/npx/npx_var.py | 5 +- 8 files changed, 156 insertions(+), 13 deletions(-) create mode 100644 _unittests/ut_array_api/test_hypothesis_array_api.py diff --git a/_unittests/test_array_api.sh b/_unittests/test_array_api.sh index 089aa3b..abab39b 100644 --- a/_unittests/test_array_api.sh +++ b/_unittests/test_array_api.sh @@ -1,4 +1,4 @@ export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy -pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_arange || exit 1 +pytest -v -rxXfE ../array-api-tests/array_api_tests/test_creation_functions.py::test_ones_like || exit 1 # pytest ../array-api-tests/array_api_tests/test_creation_functions.py --help -pytest ../array-api-tests/array_api_tests/test_creation_functions.py --hypothesis-explain --skips-file=_unittests/onnx-numpy-skips.txt || exit 1 +pytest -v -rxXfE ../array-api-tests/array_api_tests/test_creation_functions.py --hypothesis-explain --skips-file=_unittests/onnx-numpy-skips.txt || exit 1 diff --git a/_unittests/ut_array_api/test_hypothesis_array_api.py b/_unittests/ut_array_api/test_hypothesis_array_api.py new file mode 100644 index 0000000..f9a51f9 --- /dev/null +++ b/_unittests/ut_array_api/test_hypothesis_array_api.py @@ -0,0 +1,114 @@ +import unittest +from os import getenv +from functools import reduce +from operator import mul +import numpy as np +from numpy import array_api as xp +from hypothesis import given +from onnx_array_api.ext_test_case import ExtTestCase +from onnx_array_api.array_api import onnx_numpy as onxp +from hypothesis import strategies +from hypothesis.extra import array_api + + +def prod(seq): + return reduce(mul, seq, 1) + + +@strategies.composite +def array_api_kwargs(draw, **kw): + result = {} + for k, strat in kw.items(): + if draw(strategies.booleans()): + result[k] = draw(strat) + return result + + +def shapes(xp, **kw): + kw.setdefault("min_dims", 0) + kw.setdefault("min_side", 0) + + def sh(x): + return x + + return xp.array_shapes(**kw).filter( + lambda shape: prod(i for i in sh(shape) if i) + < TestHypothesisArraysApis.MAX_ARRAY_SIZE + ) + + +class TestHypothesisArraysApis(ExtTestCase): + MAX_ARRAY_SIZE = 10000 + VERSION = "2021.12" + + @classmethod + def setUpClass(cls): + api_version = getenv( + "ARRAY_API_TESTS_VERSION", + getattr(xp, "__array_api_version__", TestHypothesisArraysApis.VERSION), + ) + cls.xps = array_api.make_strategies_namespace(xp, api_version=api_version) + api_version = getenv( + "ARRAY_API_TESTS_VERSION", + getattr(onxp, "__array_api_version__", TestHypothesisArraysApis.VERSION), + ) + cls.onxps = array_api.make_strategies_namespace(onxp, api_version=api_version) + + def test_strategies(self): + self.assertNotEmpty(self.xps) + self.assertNotEmpty(self.onxps) + + def test_scalar_strategies(self): + dtypes = dict( + integer_dtypes=self.xps.integer_dtypes(), + uinteger_dtypes=self.xps.unsigned_integer_dtypes(), + floating_dtypes=self.xps.floating_dtypes(), + numeric_dtypes=self.xps.numeric_dtypes(), + boolean_dtypes=self.xps.boolean_dtypes(), + scalar_dtypes=self.xps.scalar_dtypes(), + ) + + dtypes_onnx = dict( + integer_dtypes=self.onxps.integer_dtypes(), + uinteger_dtypes=self.onxps.unsigned_integer_dtypes(), + floating_dtypes=self.onxps.floating_dtypes(), + numeric_dtypes=self.onxps.numeric_dtypes(), + boolean_dtypes=self.onxps.boolean_dtypes(), + scalar_dtypes=self.onxps.scalar_dtypes(), + ) + + for k, vnp in dtypes.items(): + vonxp = dtypes_onnx[k] + anp = self.xps.arrays(dtype=vnp, shape=shapes(self.xps)) + aonxp = self.onxps.arrays(dtype=vnp, shape=shapes(self.onxps)) + self.assertNotEmpty(anp) + + args_np = [] + + @given( + x=self.xps.arrays(dtype=dtypes["integer_dtypes"], shape=shapes(self.xps)), + kw=array_api_kwargs(dtype=strategies.none() | self.xps.scalar_dtypes()), + ) + def fct(x, kw): + args_np.append((x, kw)) + + fct() + self.assertEqual(len(args_np), 100) + + args_onxp = [] + + @given( + x=self.onxps.arrays( + dtype=dtypes_onnx["integer_dtypes"], shape=shapes(self.onxps) + ), + kw=array_api_kwargs(dtype=strategies.none() | self.onxps.scalar_dtypes()), + ) + def fctonx(x, kw): + args_onxp.append((x, kw)) + + fctonx() + self.assertEqual(len(args_onxp), len(args_np)) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 23852c7..709e13c 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -52,6 +52,13 @@ def test_ones_none(self): self.assertNotEmpty(matnp[0, 0]) self.assertEqualArray(matnp, np.ones((4, 5))) + def test_ones_like(self): + x = np.array([5, 6], dtype=np.int8) + y = np.ones_like(x) + a = EagerTensor(x) + b = xp.ones_like(a) + self.assertEqualArray(y, b.numpy()) + def test_full(self): c = EagerTensor(np.array([4, 5], dtype=np.int64)) mat = xp.full(c, fill_value=5, dtype=xp.int64) @@ -91,5 +98,5 @@ def test_arange_int00(self): if __name__ == "__main__": - TestOnnxNumpy().test_arange_int00() + # TestOnnxNumpy().test_arange_int00() unittest.main(verbosity=2) diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index 7a5b33a..bea35da 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -1184,7 +1184,7 @@ def test_shape_t(self): self.assertEqualArray(z, got[0]) def test_astype(self): - f = absolute_inline(copy_inline(Input("A")).astype(np.float32)) + f = absolute_inline(copy_inline(Input("A")).astype(DType(TensorProto.FLOAT))) self.assertIsInstance(f, Var) onx = f.to_onnx(constraints={"A": Float64[None]}) x = np.array([[-5, 6]], dtype=np.float64) @@ -1204,7 +1204,7 @@ def test_astype_dtype(self): self.assertEqualArray(z, got[0]) def test_astype_int(self): - f = absolute_inline(copy_inline(Input("A")).astype(1)) + f = absolute_inline(copy_inline(Input("A")).astype(DType(1))) self.assertIsInstance(f, Var) onx = f.to_onnx(constraints={"A": Float64[None]}) x = np.array([[-5, 6]], dtype=np.float64) @@ -2576,5 +2576,4 @@ def test_get_item_i8(self): if __name__ == "__main__": - TestNpx().test_filter() unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/_onnx_common.py b/onnx_array_api/array_api/_onnx_common.py index f832b72..8bbab9a 100644 --- a/onnx_array_api/array_api/_onnx_common.py +++ b/onnx_array_api/array_api/_onnx_common.py @@ -54,6 +54,8 @@ def template_asarray( else: raise RuntimeError(f"Unexpected type {type(a)} for the first input.") if dtype is not None: + if not isinstance(dtype, DType): + raise TypeError(f"dtype must be a DType not {type(dtype)}.") vt = v.astype(dtype) else: vt = v diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index 9aab6f8..4d9084a 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -13,6 +13,7 @@ isdtype, isfinite, isnan, + ones_like, reshape, take, ) @@ -47,6 +48,7 @@ "isfinite", "isnan", "ones", + "ones_like", "reshape", "take", "zeros", diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 27147c4..0995af7 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -4,7 +4,6 @@ from onnx import FunctionProto, ModelProto, NodeProto, TensorProto from onnx.helper import make_tensor, tensor_dtype_to_np_dtype from onnx.numpy_helper import from_array -from .._helpers import np_dtype_to_tensor_dtype from .npx_constants import FUNCTION_DOMAIN from .npx_core_api import cst, make_tuple, npxapi_inline, npxapi_no_inline, var from .npx_types import ( @@ -225,7 +224,7 @@ def arctanh( @npxapi_inline def astype( - a: TensorType[ElemType.numerics, "T1"], dtype: OptParType[DType] = 1 + a: TensorType[ElemType.numerics, "T1"], dtype: ParType[DType] = 1 ) -> TensorType[ElemType.numerics, "T2"]: """ Cast an array. @@ -234,8 +233,9 @@ def astype( raise TypeError( f"dtype is an attribute, it cannot be a Variable of type {type(dtype)}." ) - to = np_dtype_to_tensor_dtype(dtype) - return var(a, op="Cast", to=to) + if not isinstance(dtype, DType): + raise TypeError(f"dtype must of type DType, not {type(DType)}.") + return var(a, op="Cast", to=to.code) @npxapi_inline @@ -549,6 +549,26 @@ def ones( ) +@npxapi_inline +def ones_like( + x: TensorType[ElemType.allowed, "T"], + dtype: OptParType[DType] = None, +) -> TensorType[ElemType.numerics, "T"]: + """ + Implements :func:`numpy.zeros`. + """ + o = make_tensor( + name="one", + data_type=TensorProto.INT64 if dtype is None else dtype.code, + dims=[1], + vals=[1], + ) + v = var(x.shape, value=o, op="ConstantOfShape") + if dtype is None: + return var(v, x, op="CastLike") + return v + + @npxapi_inline def pad( x: TensorType[ElemType.numerics, "T"], diff --git a/onnx_array_api/npx/npx_var.py b/onnx_array_api/npx/npx_var.py index 90022c6..3341e46 100644 --- a/onnx_array_api/npx/npx_var.py +++ b/onnx_array_api/npx/npx_var.py @@ -1,7 +1,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np from onnx import FunctionProto, ModelProto, NodeProto, TensorProto -from .._helpers import np_dtype_to_tensor_dtype from .npx_array_api import BaseArrayApi, ArrayApiError from .npx_constants import DEFAULT_OPSETS, ONNX_DOMAIN from .npx_types import ( @@ -847,8 +846,8 @@ def astype(self, dtype) -> "Var": if isinstance(dtype, Var): return var(self.self_var, dtype, op="CastLike") - if not isinstance(dtype, int): - dtype = np_dtype_to_tensor_dtype(dtype) + if not isinstance(dtype, DType): + raise TypeError(f"dtype cannot be {type(dtype)}.") return var(self.self_var, op="Cast", to=dtype) @property From 762c37cb3d91dcb755363ba4db37d89259ccace4 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 23 Jun 2023 13:20:59 +0200 Subject: [PATCH 02/20] ruff --- _unittests/ut_array_api/test_hypothesis_array_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_unittests/ut_array_api/test_hypothesis_array_api.py b/_unittests/ut_array_api/test_hypothesis_array_api.py index f9a51f9..a1ab203 100644 --- a/_unittests/ut_array_api/test_hypothesis_array_api.py +++ b/_unittests/ut_array_api/test_hypothesis_array_api.py @@ -2,7 +2,6 @@ from os import getenv from functools import reduce from operator import mul -import numpy as np from numpy import array_api as xp from hypothesis import given from onnx_array_api.ext_test_case import ExtTestCase @@ -80,8 +79,9 @@ def test_scalar_strategies(self): for k, vnp in dtypes.items(): vonxp = dtypes_onnx[k] anp = self.xps.arrays(dtype=vnp, shape=shapes(self.xps)) - aonxp = self.onxps.arrays(dtype=vnp, shape=shapes(self.onxps)) + aonxp = self.onxps.arrays(dtype=vonxp, shape=shapes(self.onxps)) self.assertNotEmpty(anp) + self.assertNotEmpty(aonxp) args_np = [] From cd90b337b5339a3ecc49ab6379c03296b0cd50ee Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sat, 24 Jun 2023 11:23:26 +0200 Subject: [PATCH 03/20] requirements --- requirements-dev.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-dev.txt b/requirements-dev.txt index cc2105e..07fd7c3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,6 +3,7 @@ black coverage flake8 furo +hypothesis isort joblib lightgbm From 10522cf16482ef77b5dd3ee7a5bf6a7fc8dcace9 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sat, 24 Jun 2023 12:07:44 +0200 Subject: [PATCH 04/20] */ --- .../ut_array_api/test_hypothesis_array_api.py | 17 +- _unittests/ut_npx/test_npx.py | 4 +- _unittests/ut_ort/test_ort_tensor.py | 5 +- onnx_array_api/array_api/onnx_numpy.py | 8 + onnx_array_api/npx/npx_functions.py | 159 +++++++++++++----- 5 files changed, 141 insertions(+), 52 deletions(-) diff --git a/_unittests/ut_array_api/test_hypothesis_array_api.py b/_unittests/ut_array_api/test_hypothesis_array_api.py index a1ab203..4e50ac4 100644 --- a/_unittests/ut_array_api/test_hypothesis_array_api.py +++ b/_unittests/ut_array_api/test_hypothesis_array_api.py @@ -1,8 +1,8 @@ import unittest +import warnings from os import getenv from functools import reduce from operator import mul -from numpy import array_api as xp from hypothesis import given from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.array_api import onnx_numpy as onxp @@ -42,6 +42,10 @@ class TestHypothesisArraysApis(ExtTestCase): @classmethod def setUpClass(cls): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from numpy import array_api as xp + api_version = getenv( "ARRAY_API_TESTS_VERSION", getattr(xp, "__array_api_version__", TestHypothesisArraysApis.VERSION), @@ -97,12 +101,11 @@ def fct(x, kw): args_onxp = [] - @given( - x=self.onxps.arrays( - dtype=dtypes_onnx["integer_dtypes"], shape=shapes(self.onxps) - ), - kw=array_api_kwargs(dtype=strategies.none() | self.onxps.scalar_dtypes()), - ) + xshape = shapes(self.onxps) + xx = self.onxps.arrays(dtype=dtypes_onnx["integer_dtypes"], shape=xshape) + kw = array_api_kwargs(dtype=strategies.none() | self.onxps.scalar_dtypes()) + + @given(x=xx, kw=kw) def fctonx(x, kw): args_onxp.append((x, kw)) diff --git a/_unittests/ut_npx/test_npx.py b/_unittests/ut_npx/test_npx.py index bea35da..a47309e 100644 --- a/_unittests/ut_npx/test_npx.py +++ b/_unittests/ut_npx/test_npx.py @@ -1509,7 +1509,7 @@ def test_hstack(self): self.assertEqualArray(z, got[0]) def test_identity(self): - f = identity_inline(2, dtype=np.float64) + f = identity_inline(n=2, dtype=np.float64) onx = f.to_onnx(constraints={(0, False): Float64[None]}) self.assertIn('name: "dtype"', str(onx)) z = np.identity(2).astype(np.float64) @@ -1518,7 +1518,7 @@ def test_identity(self): self.assertEqualArray(z, got[0]) def test_identity_uint8(self): - f = identity_inline(2, dtype=np.uint8) + f = identity_inline(n=2, dtype=np.uint8) onx = f.to_onnx(constraints={(0, False): Float64[None]}) self.assertIn('name: "dtype"', str(onx)) z = np.identity(2).astype(np.uint8) diff --git a/_unittests/ut_ort/test_ort_tensor.py b/_unittests/ut_ort/test_ort_tensor.py index b673557..a2c8f15 100644 --- a/_unittests/ut_ort/test_ort_tensor.py +++ b/_unittests/ut_ort/test_ort_tensor.py @@ -2,6 +2,7 @@ from contextlib import redirect_stdout from io import StringIO import numpy as np +from onnx import TensorProto from onnx.defs import onnx_opset_version from onnx.reference import ReferenceEvaluator from onnxruntime import InferenceSession @@ -193,7 +194,7 @@ def impl(xa, xb): raise AssertionError(f"Function is not using argument:\n{onx}") def test_astype(self): - f = absolute_inline(copy_inline(Input("A")).astype(np.float32)) + f = absolute_inline(copy_inline(Input("A")).astype(DType(TensorProto.FLOAT))) onx = f.to_onnx(constraints={"A": Float64[None]}) x = np.array([[-5, 6]], dtype=np.float64) z = np.abs(x.astype(np.float32)) @@ -204,7 +205,7 @@ def test_astype(self): self.assertEqualArray(z, got[0]) def test_astype0(self): - f = absolute_inline(copy_inline(Input("A")).astype(np.float32)) + f = absolute_inline(copy_inline(Input("A")).astype(DType(TensorProto.FLOAT))) onx = f.to_onnx(constraints={"A": Float64[None]}) x = np.array(-5, dtype=np.float64) z = np.abs(x.astype(np.float32)) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index 4d9084a..f2d9645 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -57,6 +57,8 @@ def asarray( a: Any, + /, + *, dtype: Optional[DType] = None, order: Optional[str] = None, like: Any = None, @@ -72,8 +74,10 @@ def asarray( def arange( start_or_stop: TensorType[ElemType.int64, "I", (1,)], + /, stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, step: OptTensorType[ElemType.int64, "I", (1,)] = None, + *, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: use_float = any( @@ -110,6 +114,7 @@ def arange( def ones( shape: TensorType[ElemType.int64, "I", (None,)], + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: @@ -128,6 +133,7 @@ def ones( def empty( shape: TensorType[ElemType.int64, "I", (None,)], + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: @@ -139,6 +145,7 @@ def empty( def zeros( shape: TensorType[ElemType.int64, "I", (None,)], + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: @@ -158,6 +165,7 @@ def zeros( def full( shape: TensorType[ElemType.int64, "I", (None,)], fill_value: ParType[Scalar] = None, + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 0995af7..6e382ba 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -29,14 +29,14 @@ def _cstv(x): @npxapi_inline -def abs(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def abs(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.abs`." return var(x, op="Abs") @npxapi_inline def absolute( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.abs`." return var(x, op="Abs") @@ -45,6 +45,8 @@ def absolute( @npxapi_inline def all( x: TensorType[ElemType.bool_, "T"], + /, + *, axis: OptTensorType[ElemType.int64, "I"] = None, keepdims: ParType[int] = 0, ) -> TensorType[ElemType.bool_, "T"]: @@ -79,6 +81,8 @@ def all( @npxapi_inline def amax( x: TensorType[ElemType.numerics, "T"], + /, + *, axis: OptParType[int] = 0, keepdims: OptParType[int] = 0, ) -> TensorType[ElemType.numerics, "T"]: @@ -91,6 +95,8 @@ def amax( @npxapi_inline def amin( x: TensorType[ElemType.numerics, "T"], + /, + *, axis: OptParType[int] = 0, keepdims: OptParType[int] = 0, ) -> TensorType[ElemType.numerics, "T"]: @@ -113,6 +119,7 @@ def arange( "I", (1,), ], + /, stop_or_step: OptTensorType[ { ElemType.int16, @@ -135,6 +142,7 @@ def arange( "I", (1,), ] = None, + *, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." @@ -157,14 +165,16 @@ def arange( @npxapi_inline -def arccos(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def arccos( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." return var(x, op="Acos") @npxapi_inline def arccosh( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccosh`." return var(x, op="Acosh") @@ -173,6 +183,8 @@ def arccosh( @npxapi_inline def argmax( x: TensorType[ElemType.numerics, "T"], + /, + *, axis: OptParType[int] = 0, keepdims: OptParType[int] = 0, ) -> TensorType[ElemType.numerics, "T"]: @@ -185,6 +197,8 @@ def argmax( @npxapi_inline def argmin( x: TensorType[ElemType.numerics, "T"], + /, + *, axis: OptParType[int] = 0, keepdims: OptParType[int] = 0, ) -> TensorType[ElemType.numerics, "T"]: @@ -195,14 +209,16 @@ def argmin( @npxapi_inline -def arcsin(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def arcsin( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arcsin`." return var(x, op="Asin") @npxapi_inline def arcsinh( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arcsinh`." return var(x, op="Asinh") @@ -216,7 +232,7 @@ def arctan(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numeric @npxapi_inline def arctanh( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arctanh`." return var(x, op="Atanh") @@ -224,7 +240,7 @@ def arctanh( @npxapi_inline def astype( - a: TensorType[ElemType.numerics, "T1"], dtype: ParType[DType] = 1 + a: TensorType[ElemType.numerics, "T1"], dtype: ParType[DType] = 1, / ) -> TensorType[ElemType.numerics, "T2"]: """ Cast an array. @@ -234,7 +250,16 @@ def astype( f"dtype is an attribute, it cannot be a Variable of type {type(dtype)}." ) if not isinstance(dtype, DType): - raise TypeError(f"dtype must of type DType, not {type(DType)}.") + if dtype is int: + to = DType(TensorProto.INT64) + elif dtype is float: + to = DType(TensorProto.FLOAT64) + elif dtype is bool: + to = DType(TensorProto.FLOAT64) + elif dtype is str: + to = DType(TensorProto.STRING) + else: + raise TypeError(f"dtype must of type DType, not {type(dtype)}-{dtype}.") return var(a, op="Cast", to=to.code) @@ -242,6 +267,8 @@ def astype( def cdist( xa: TensorType[ElemType.numerics, "T"], xb: TensorType[ElemType.numerics, "T"], + /, + *, metric: OptParType[str] = "euclidean", ) -> TensorType[ElemType.numerics, "T"]: """ @@ -251,7 +278,9 @@ def cdist( @npxapi_inline -def ceil(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def ceil( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.ceil`." return var(x, op="Ceil") @@ -259,6 +288,7 @@ def ceil(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, @npxapi_inline def clip( x: TensorType[ElemType.numerics, "T"], + /, a_min: TensorType[ElemType.numerics, "T"] = None, a_max: TensorType[ElemType.numerics, "T"] = None, ): @@ -277,6 +307,8 @@ def clip( def compress( condition: TensorType[ElemType.bool_, "B"], x: TensorType[ElemType.numerics, "T"], + /, + *, axis: OptParType[int] = None, ) -> TensorType[ElemType.numerics, "T"]: """ @@ -295,8 +327,12 @@ def compute( name: ParType[str] = None, ) -> TupleType[TensorType[ElemType.numerics, "T"]]: """ - Operator concat, handle :func:`numpy.vstack` and - :func:`numpy.hstack`. + Executes an onnx proto. + + :param x: inputs + :param proto: proto to execute + :param name: model name + :return: outputs """ return var(*x, op=proto, name=name) @@ -315,13 +351,15 @@ def concat( @npxapi_inline -def cos(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def cos(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.cos`." return var(x, op="Cos") @npxapi_inline -def cosh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def cosh( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.cosh`." return var(x, op="Cosh") @@ -329,6 +367,7 @@ def cosh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, @npxapi_inline def cumsum( x: TensorType[ElemType.numerics, "T"], + /, axis: OptTensorType[ElemType.int64, "I"] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.cumsum`." @@ -345,14 +384,14 @@ def cumsum( @npxapi_inline -def det(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def det(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.linalg:det`." return var(x, op="Det") @npxapi_inline def dot( - a: TensorType[ElemType.numerics, "T"], b: TensorType[ElemType.numerics, "T"] + a: TensorType[ElemType.numerics, "T"], b: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: """ See :func:`numpy.dot` @@ -372,29 +411,31 @@ def einsum( @npxapi_inline def equal( - x: TensorType[ElemType.allowed, "T"], y: TensorType[ElemType.allowed, "T"] + x: TensorType[ElemType.allowed, "T"], y: TensorType[ElemType.allowed, "T"], / ) -> TensorType[ElemType.bool_, "T1"]: "See :func:`numpy.equal`." return var(x, y, op="Equal") @npxapi_inline -def erf(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def erf(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`scipy.special.erf`." return var(x, op="Erf") @npxapi_inline -def exp(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def exp(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.exp`." return var(x, op="Exp") @npxapi_inline def expand_dims( - x: TensorType[ElemType.numerics, "T"], axis: TensorType[ElemType.int64, "I"] + x: TensorType[ElemType.numerics, "T"], /, axis: TensorType[ElemType.int64, "I"] ) -> TensorType[ElemType.numerics, "T"]: - "See :func:`numpy.expand_dims`." + """ + See :func:`numpy.expand_dims`. + """ if isinstance(axis, int): axis = (axis,) if isinstance(axis, tuple): @@ -403,7 +444,9 @@ def expand_dims( @npxapi_inline -def expit(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def expit( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`scipy.special.expit`." return var(x, op="Sigmoid") @@ -411,6 +454,8 @@ def expit(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics @npxapi_inline def full( shape: TensorType[ElemType.int64, "I", (None,)], + /, + *, dtype: OptParType[DType] = None, fill_value: ParType[Scalar] = None, order: OptParType[str] = "C", @@ -445,7 +490,9 @@ def full( @npxapi_inline -def floor(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def floor( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.floor`." return var(x, op="Floor") @@ -461,14 +508,14 @@ def hstack( @npxapi_inline -def copy(x: TensorType[ElemType.allowed, "T"]) -> TensorType[ElemType.allowed, "T"]: +def copy(x: TensorType[ElemType.allowed, "T"], /) -> TensorType[ElemType.allowed, "T"]: "Makes a copy." return var(x, op="Identity") @npxapi_inline def identity( - n: ParType[int], dtype: OptParType[DType] = None + *, n: ParType[int], dtype: OptParType[DType] = None ) -> TensorType[ElemType.numerics, "T"]: "Makes a copy." model = var( @@ -497,25 +544,29 @@ def isdtype( @npxapi_inline -def isfinite(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.bool_, "T1"]: +def isfinite( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.bool_, "T1"]: "See :func:`numpy.isfinite`." return var(x, op="IsInf") @npxapi_inline -def isnan(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.bool_, "T1"]: +def isnan(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.bool_, "T1"]: "See :func:`numpy.isnan`." return var(x, op="IsNaN") @npxapi_inline -def log(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def log(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.log`." return var(x, op="Log") @npxapi_inline -def log1p(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def log1p( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.log1p`." x1 = var(x, var(cst(np.array([1])), x, op="CastLike"), op="Add") return var(x1, op="Log") @@ -523,7 +574,7 @@ def log1p(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics @npxapi_inline def matmul( - a: TensorType[ElemType.numerics, "T"], b: TensorType[ElemType.numerics, "T"] + a: TensorType[ElemType.numerics, "T"], b: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.matmul`." return var(a, b, op="MatMul") @@ -532,6 +583,8 @@ def matmul( @npxapi_inline def ones( shape: TensorType[ElemType.int64, "I", (None,)], + /, + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: @@ -552,6 +605,8 @@ def ones( @npxapi_inline def ones_like( x: TensorType[ElemType.allowed, "T"], + /, + *, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: """ @@ -573,6 +628,7 @@ def ones_like( def pad( x: TensorType[ElemType.numerics, "T"], pads: TensorType[ElemType.int64, "I"], + /, constant_value: OptTensorType[ElemType.numerics, "T"] = None, axes: OptTensorType[ElemType.int64, "I"] = None, mode: ParType[str] = "constant", @@ -591,14 +647,16 @@ def pad( @npxapi_inline def reciprocal( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.reciprocal`." return var(x, op="Reciprocal") @npxapi_inline -def relu(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def relu( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "relu" return var(x, op="Relu") @@ -607,6 +665,7 @@ def relu(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, def reshape( x: TensorType[ElemType.numerics, "T"], shape: TensorType[ElemType.int64, "I", (None,)], + /, ) -> TensorType[ElemType.numerics, "T"]: """ See :func:`numpy.reshape`. @@ -629,39 +688,47 @@ def reshape( @npxapi_inline -def round(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def round( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.round`." return var(x, op="Round") @npxapi_inline def sigmoid( - x: TensorType[ElemType.numerics, "T"] + x: TensorType[ElemType.numerics, "T"], / ) -> TensorType[ElemType.numerics, "T"]: "See :func:`scipy.special.expit`." return var(x, op="Sigmoid") @npxapi_inline -def sign(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def sign( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.sign`." return var(x, op="Sign") @npxapi_inline -def sin(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def sin(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.sin`." return var(x, op="Sin") @npxapi_inline -def sinh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def sinh( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.sinh`." return var(x, op="Sinh") @npxapi_inline -def sqrt(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def sqrt( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.sqrt`." return var(x, op="Sqrt") @@ -669,6 +736,7 @@ def sqrt(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, @npxapi_inline def squeeze( x: TensorType[ElemType.numerics, "T"], + /, axis: OptTensorType[ElemType.int64, "I"] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.squeeze`." @@ -689,6 +757,8 @@ def squeeze( def take( data: TensorType[ElemType.numerics, "T"], indices: TensorType[ElemType.int64, "I"], + /, + *, axis: ParType[int] = 0, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.take`." @@ -696,13 +766,15 @@ def take( @npxapi_inline -def tan(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def tan(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.tan`." return var(x, op="Tan") @npxapi_inline -def tanh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]: +def tanh( + x: TensorType[ElemType.numerics, "T"], / +) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.tanh`." return var(x, op="Tanh") @@ -711,6 +783,8 @@ def tanh(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, def topk( x: TensorType[ElemType.numerics, "T"], k: TensorType[ElemType.int64, "I", (1,)], + /, + *, axis: OptParType[int] = -1, largest: OptParType[int] = 1, sorted: OptParType[int] = 1, @@ -721,7 +795,7 @@ def topk( @npxapi_inline def transpose( - x: TensorType[ElemType.numerics, "T"], perm: ParType[Tuple[int, ...]] = (1, 0) + x: TensorType[ElemType.numerics, "T"], /, *, perm: ParType[Tuple[int, ...]] = (1, 0) ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.transpose`." return var(x, op="Transpose", perm=list(perm)) @@ -754,6 +828,7 @@ def where( cond: TensorType[ElemType.bool_, "B"], x: TensorType[ElemType.numerics, "T"], y: TensorType[ElemType.numerics, "T"], + /, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.where`." return var(cond, x, y, op="Where") @@ -762,6 +837,8 @@ def where( @npxapi_inline def zeros( shape: TensorType[ElemType.int64, "I", (None,)], + /, + *, dtype: OptParType[DType] = None, order: OptParType[str] = "C", ) -> TensorType[ElemType.numerics, "T"]: From 9296b47e46ef6731716fed6d916dacc5a43d5490 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 25 Jun 2023 13:29:33 +0200 Subject: [PATCH 05/20] improvments --- .../ut_array_api/test_hypothesis_array_api.py | 3 +++ onnx_array_api/array_api/onnx_numpy.py | 2 -- onnx_array_api/npx/npx_functions.py | 6 +++-- onnx_array_api/npx/npx_jit_eager.py | 26 ++++++++++++++++--- onnx_array_api/npx/npx_numpy_tensors.py | 4 +++ 5 files changed, 34 insertions(+), 7 deletions(-) diff --git a/_unittests/ut_array_api/test_hypothesis_array_api.py b/_unittests/ut_array_api/test_hypothesis_array_api.py index 4e50ac4..8a854e0 100644 --- a/_unittests/ut_array_api/test_hypothesis_array_api.py +++ b/_unittests/ut_array_api/test_hypothesis_array_api.py @@ -114,4 +114,7 @@ def fctonx(x, kw): if __name__ == "__main__": + cl = TestHypothesisArraysApis() + cl.setUpClass() + cl.test_scalar_strategies() unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index f2d9645..edd6e50 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -74,10 +74,8 @@ def asarray( def arange( start_or_stop: TensorType[ElemType.int64, "I", (1,)], - /, stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, step: OptTensorType[ElemType.int64, "I", (1,)] = None, - *, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: use_float = any( diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 6e382ba..327ad88 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -119,7 +119,6 @@ def arange( "I", (1,), ], - /, stop_or_step: OptTensorType[ { ElemType.int16, @@ -142,7 +141,6 @@ def arange( "I", (1,), ] = None, - *, dtype: OptParType[DType] = None, ) -> TensorType[ElemType.numerics, "T"]: "See :func:`numpy.arccos`." @@ -683,6 +681,10 @@ def reshape( """ if isinstance(shape, int): shape = cst(np.array([shape], dtype=np.int64)) + return var(x, shape, op="Reshape") + if isinstance(shape, tuple) and len(shape) == 0: + shape = cst(np.array([-1], dtype=np.int64)) + return var(x, shape, op="Reshape") shape_reshaped = var(shape, cst(np.array([-1], dtype=np.int64)), op="Reshape") return var(x, shape_reshaped, op="Reshape") diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 58ffff6..3cfae20 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -243,9 +243,22 @@ def to_jit(self, *values, **kwargs): ): constraints[iname] = annot_values[i] kwargs_to_input[iname] = i, annot_values[i] + elif ( + v is not None + and i < len(annot_values) + and issubclass(annot_values[i], TensorType) + ): + constraints[iname] = annot_values[i] + kwargs_to_input[iname] = i, annot_values[i] else: new_kwargs[iname] = v input_to_kwargs[i] = iname + if iname == "shape": + raise RuntimeError( + f"Inconsistency for function {self.f}, iname={iname!r}, " + f"i={i}, v={v!r}, annot_values={annot_values}." + ) + if self.input_to_kwargs_ is None: self.n_inputs_ = ( len(values) - len(input_to_kwargs) + len(kwargs_to_input) @@ -399,6 +412,13 @@ def move_input_to_kwargs( new_kwargs[self.input_to_kwargs_[i]] = v else: new_values.append(v) + if "shape" in new_kwargs: + raise RuntimeError( + f"Inconsistency for function {self.f}, " + f"values={values}, kwargs={kwargs}, ", + f"new_values={new_values}, new_kwargs={new_kwargs}, " + f"self.input_to_kwargs_={self.input_to_kwargs_}", + ) return tuple(new_values), new_kwargs def jit_call(self, *values, **kwargs): @@ -466,8 +486,8 @@ def jit_call(self, *values, **kwargs): raise RuntimeError( f"Unable to run function for key={key!r}, " f"types={[type(x) for x in values]}, " - f"dtypes={[x.dtype for x in values]}, " - f"shapes={[x.shape for x in values]}, " + f"dtypes={[getattr(x, 'dtype', type(x)) for x in values]}, " + f"shapes={[getattr(x, 'shape', len(x)) for x in values]}, " f"kwargs={kwargs}, " f"self.input_to_kwargs_={self.input_to_kwargs_}, " f"f={self.f} from module {self.f.__module__!r} " @@ -658,7 +678,7 @@ def __call__(self, *args, already_eager=False, **kwargs): tuple, slice, type, - np.ndarray, + # np.ndarray, DType, ), ), diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index 5a41cc8..675076f 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -76,6 +76,10 @@ def __repr__(self) -> str: "usual" return f"{self.__class__.__name__}({self._tensor!r})" + def __len__(self): + "usual" + return len(self._tensor) + def numpy(self): "Returns the array converted into a numpy array." return self._tensor From 9e2f971e5effc4e2d78ea5f522ac61f88636c2e2 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 25 Jun 2023 14:35:03 +0200 Subject: [PATCH 06/20] better processing of tuple --- onnx_array_api/npx/npx_core_api.py | 4 ++-- onnx_array_api/npx/npx_jit_eager.py | 27 ++++++++++++++----------- onnx_array_api/npx/npx_numpy_tensors.py | 7 ++++++- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/onnx_array_api/npx/npx_core_api.py b/onnx_array_api/npx/npx_core_api.py index 548a40a..9eb5861 100644 --- a/onnx_array_api/npx/npx_core_api.py +++ b/onnx_array_api/npx/npx_core_api.py @@ -74,7 +74,7 @@ def _process_parameter(fn, sig, k, v, new_pars, inline): parent_op=(fn.__module__, fn.__name__, 0), ) return - if isinstance(v, (int, float, str, tuple, DType)): + if isinstance(v, (int, float, str, DType)): if inline: new_pars[k] = v else: @@ -85,7 +85,7 @@ def _process_parameter(fn, sig, k, v, new_pars, inline): parent_op=(fn.__module__, fn.__name__, 0), ) return - if isinstance(v, (Cst, Var)): + if isinstance(v, (Cst, Var, tuple)): raise TypeError( f"Parameter {k!r} is a tensor ({type(v)}), it is not " f"supported for a named parameter." diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 3cfae20..3e7f60b 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -574,6 +574,19 @@ class EagerOnnx(JitEager): :param ir_version: defines the IR version to use """ + allowed_input_types = ( + EagerTensor, + Cst, + int, + bool, + float, + tuple, + slice, + type, + # np.ndarray, + DType, + ) + def __init__( self, f: Callable, @@ -616,6 +629,7 @@ def _preprocess_constants(self, *args): new_args.append( self.tensor_class(np.array(list(n), dtype=np.int64)) ) + modified = True elif any(map(lambda t: isinstance(t, Var), n)): raise TypeError( f"Unexpected types in tuple " @@ -669,18 +683,7 @@ def __call__(self, *args, already_eager=False, **kwargs): lambda t: t is not None and not isinstance( t, - ( - EagerTensor, - Cst, - int, - bool, - float, - tuple, - slice, - type, - # np.ndarray, - DType, - ), + EagerOnnx.allowed_input_types, ), args, ) diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index 675076f..7c960a8 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -42,7 +42,12 @@ def run(self, *inputs: List["NumpyTensor"]) -> List["NumpyTensor"]: ) feeds = {} for name, inp in zip(self.input_names, inputs): - feeds[name] = None if inp is None else inp.value + if inp is None: + feeds[name] = None + continue + if not isinstance(inp, EagerTensor): + raise TypeError(f"Unexpected type {type(inp)} for input {name!r}.") + feeds[name] = inp.value res = self.ref.run(None, feeds) return list(map(self.tensor_class, res)) From e8ac2f41cb8e9c3109333918fed8253d5b907161 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 25 Jun 2023 14:56:08 +0200 Subject: [PATCH 07/20] improvments --- onnx_array_api/npx/npx_core_api.py | 20 +++++++++++++++++ onnx_array_api/npx/npx_jit_eager.py | 2 +- onnx_array_api/npx/npx_numpy_tensors.py | 30 ++++++++++++++++++++----- onnx_array_api/ort/ort_tensors.py | 6 +++-- 4 files changed, 49 insertions(+), 9 deletions(-) diff --git a/onnx_array_api/npx/npx_core_api.py b/onnx_array_api/npx/npx_core_api.py index 9eb5861..1249273 100644 --- a/onnx_array_api/npx/npx_core_api.py +++ b/onnx_array_api/npx/npx_core_api.py @@ -8,6 +8,15 @@ from .npx_types import DType, ElemType, OptParType, ParType, TupleType from .npx_var import Cst, Input, ManyIdentity, Par, Var +# list of function arguments the API can receive as tuple. +_arg_name_as_tuple = {"perm"} + + +class args_tuple(tuple): + """Overwrites a tuple to make the distinction later in the code.""" + + pass + def cst(*args, **kwargs): """ @@ -85,6 +94,17 @@ def _process_parameter(fn, sig, k, v, new_pars, inline): parent_op=(fn.__module__, fn.__name__, 0), ) return + if isinstance(v, tuple) and k in _arg_name_as_tuple: + if inline: + new_pars[k] = args_tuple(v) + else: + new_pars[k] = Par( + k, + dtype=ParType[type(v)], + value=args_tuple, + parent_op=(fn.__module__, fn.__name__, 0), + ) + return if isinstance(v, (Cst, Var, tuple)): raise TypeError( f"Parameter {k!r} is a tensor ({type(v)}), it is not " diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 3e7f60b..0cb7e10 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -349,7 +349,7 @@ def to_jit(self, *values, **kwargs): f"and inputs={onx.graph.input}." ) - exe = self.tensor_class.create_function(names, onx) + exe = self.tensor_class.create_function(names, onx, f=self.f) self.info("-", "to_jit") return onx, exe diff --git a/onnx_array_api/npx/npx_numpy_tensors.py b/onnx_array_api/npx/npx_numpy_tensors.py index 7c960a8..80f530a 100644 --- a/onnx_array_api/npx/npx_numpy_tensors.py +++ b/onnx_array_api/npx/npx_numpy_tensors.py @@ -21,12 +21,24 @@ class Evaluator: """ Wraps class :class:`onnx.reference.ReferenceEvaluator` to have a signature closer to python function. + + :param tensor_class: class tensor such as :class:`NumpyTensor` + :param input_names: input names + :param onx: onnx model + :param f: unused except in error messages """ - def __init__(self, tensor_class: type, input_names: List[str], onx: ModelProto): + def __init__( + self, + tensor_class: type, + input_names: List[str], + onx: ModelProto, + f: Callable, + ): self.ref = ReferenceEvaluator(onx, new_ops=[ConstantOfShape]) self.input_names = input_names self.tensor_class = tensor_class + self._f = f def run(self, *inputs: List["NumpyTensor"]) -> List["NumpyTensor"]: """ @@ -38,15 +50,19 @@ def run(self, *inputs: List["NumpyTensor"]) -> List["NumpyTensor"]: if len(inputs) != len(self.input_names): raise ValueError( f"Expected {len(self.input_names)} inputs but got {len(inputs)}, " - f"self.input_names={self.input_names}, inputs={inputs}." + f"self.input_names={self.input_names}, " + f"inputs={inputs}, f={self._f}." ) feeds = {} for name, inp in zip(self.input_names, inputs): if inp is None: feeds[name] = None continue - if not isinstance(inp, EagerTensor): - raise TypeError(f"Unexpected type {type(inp)} for input {name!r}.") + if not isinstance(inp, (EagerTensor, JitTensor)): + raise TypeError( + f"Unexpected type {type(inp)} for input {name!r}, " + f"inp={inp!r}, f={self._f}." + ) feeds[name] = inp.value res = self.ref.run(None, feeds) return list(map(self.tensor_class, res)) @@ -145,7 +161,9 @@ def tensor_type_dims(self, name: str) -> TensorType: return TensorType[dt, self.dims, name] @classmethod - def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: + def create_function( + cls: Any, input_names: List[str], onx: ModelProto, f: Callable + ) -> Callable: """ Creates a python function calling the onnx backend used by this class. @@ -153,7 +171,7 @@ def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callab :param onx: onnx model :return: python function """ - return cls.Evaluator(cls, input_names, onx) + return cls.Evaluator(cls, input_names, onx, f=f) @classmethod def get_opsets(cls, opsets): diff --git a/onnx_array_api/ort/ort_tensors.py b/onnx_array_api/ort/ort_tensors.py index f4f447d..021ddb8 100644 --- a/onnx_array_api/ort/ort_tensors.py +++ b/onnx_array_api/ort/ort_tensors.py @@ -196,7 +196,9 @@ def tensor_type_dims(self, name: str) -> TensorType: return TensorType[dt, self.dims, name] @classmethod - def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callable: + def create_function( + cls: Any, input_names: List[str], onx: ModelProto, f: Callable + ) -> Callable: """ Creates a python function calling the onnx backend used by this class. @@ -204,7 +206,7 @@ def create_function(cls: Any, input_names: List[str], onx: ModelProto) -> Callab :param onx: onnx model :return: python function """ - return cls.Evaluator(cls, input_names, onx) + return cls.Evaluator(cls, input_names, onx, f=f) class OrtCommon: From 65b35936e4a250ce2d5624ef81b8091a3fa9a745 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Mon, 26 Jun 2023 09:48:06 +0200 Subject: [PATCH 08/20] add missing argument --- onnx_array_api/ort/ort_tensors.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/onnx_array_api/ort/ort_tensors.py b/onnx_array_api/ort/ort_tensors.py index 021ddb8..e0435ab 100644 --- a/onnx_array_api/ort/ort_tensors.py +++ b/onnx_array_api/ort/ort_tensors.py @@ -68,9 +68,20 @@ class Evaluator: """ Wraps class :class:`onnxruntime.InferenceSession` to have a signature closer to python function. + + :param tensor_class: class tensor such as :class:`NumpyTensor` + :param input_names: input names + :param onx: onnx model + :param f: unused except in error messages """ - def __init__(self, tensor_class: type, input_names: List[str], onx: ModelProto): + def __init__( + self, + tensor_class: type, + input_names: List[str], + onx: ModelProto, + f: Callable = None, + ): try: self.ref = InferenceSession( onx.SerializeToString(), @@ -102,6 +113,7 @@ def __init__(self, tensor_class: type, input_names: List[str], onx: ModelProto): self.tensor_class = tensor_class self.output_names = [output.name for output in self.ref._outputs_meta] self.run_options = RunOptions() + self._f = f def run(self, *inputs: List["OrtTensor"]) -> List["OrtTensor"]: """ @@ -113,7 +125,7 @@ def run(self, *inputs: List["OrtTensor"]) -> List["OrtTensor"]: if len(inputs) != len(self.input_names): raise ValueError( f"Expected {len(self.input_names)} inputs but got " - f"len(inputs)={len(inputs)}." + f"len(inputs)={len(inputs)}, f={self._f}." ) feeds = {} for name, inp in zip(self.input_names, inputs): From 27faed2a88318eec12ecead727423d6fa8de12de Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Mon, 26 Jun 2023 10:19:34 +0200 Subject: [PATCH 09/20] fix one issue --- _unittests/ut_npx/test_sklearn_array_api.py | 5 +-- onnx_array_api/npx/npx_functions.py | 3 +- onnx_array_api/npx/npx_jit_eager.py | 39 ++++++++++++++++----- 3 files changed, 35 insertions(+), 12 deletions(-) diff --git a/_unittests/ut_npx/test_sklearn_array_api.py b/_unittests/ut_npx/test_sklearn_array_api.py index 79120a9..aaf2a4f 100644 --- a/_unittests/ut_npx/test_sklearn_array_api.py +++ b/_unittests/ut_npx/test_sklearn_array_api.py @@ -34,6 +34,7 @@ def test_sklearn_array_api_linear_discriminant(self): if __name__ == "__main__": - # import logging - # logging.basicConfig(level=logging.DEBUG) + import logging + + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index 327ad88..d49d400 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -238,7 +238,8 @@ def arctanh( @npxapi_inline def astype( - a: TensorType[ElemType.numerics, "T1"], dtype: ParType[DType] = 1, / + a: TensorType[ElemType.numerics, "T1"], + dtype: ParType[DType] = 1, ) -> TensorType[ElemType.numerics, "T2"]: """ Cast an array. diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 0cb7e10..369f3a3 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -50,7 +50,14 @@ def __init__( self.kwargs_to_input_ = None self.method_name_ = None - def info(self, prefix: Optional[str] = None, method_name: Optional[str] = None): + def info( + self, + prefix: Optional[str] = None, + method_name: Optional[str] = None, + already_eager: Optional[bool] = None, + args: Optional[List[Any]] = None, + kwargs: Optional[Dict[str, Any]] = None, + ): """ Logs a status. """ @@ -58,7 +65,7 @@ def info(self, prefix: Optional[str] = None, method_name: Optional[str] = None): logger.info("") return logger.info( - "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s", + "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s ae=%s", prefix, self.__class__.__name__, method_name[:6], @@ -70,7 +77,14 @@ def info(self, prefix: Optional[str] = None, method_name: Optional[str] = None): self.f.__name__, self.tensor_class.__name__, self.method_name_ or "", + "" if already_eager is None else (1 if already_eager else 0), ) + if args is not None or kwargs is not None: + logger.debug( + "---- [%s] [%s]", + "" if args is None else str(args), + "" if kwargs is None else str(kwargs), + ) def status(self, me: str) -> str: """ @@ -214,7 +228,7 @@ def to_jit(self, *values, **kwargs): The onnx graph built by the function defines the input types and the expected number of dimensions. """ - self.info("+", "to_jit") + self.info("+", "to_jit", args=values, kwargs=kwargs) annotations = self.f.__annotations__ if len(annotations) > 0: input_to_kwargs = {} @@ -323,6 +337,7 @@ def to_jit(self, *values, **kwargs): else: kwargs = kwargs.copy() kwargs.update(new_kwargs) + self.info("=", "to_jit", args=inputs, kwargs=kwargs) try: var = self.f(*inputs, **kwargs) except TypeError as e: @@ -430,7 +445,7 @@ def jit_call(self, *values, **kwargs): indexed by the previous key. Finally, it executes the onnx graph and returns the result or the results in a tuple if there are several. """ - self.info("+", "jit_call") + self.info("+", "jit_call", args=values, kwargs=kwargs) if self.input_to_kwargs_ is None: # No jitting was ever called. try: @@ -462,7 +477,7 @@ def jit_call(self, *values, **kwargs): key = self.make_key(*values, **kwargs) if self.method_name_ is None and "method_name" in key: pos = list(key).index("method_name") - self.method_name_ = key[pos + 1] + self.method_name_ = key[pos + 2] if onx is not None: # First jitting. @@ -546,7 +561,7 @@ def __call__(self, *args, **kwargs): The method first wraps the inputs with `self.tensor_class` and converts them into python types just after. """ - self.info("+", "__call__") + self.info("+", "__call__", args=args, kwargs=kwargs) values = self.cast_to_tensor_class(args) res = self.jit_call(*values, **kwargs) res = self.cast_from_tensor_class(res) @@ -676,7 +691,9 @@ def __call__(self, *args, already_eager=False, **kwargs): EagerTensor and the returned outputs must be the same """ self.info() - self.info("+", "__call__") + self.info( + "+", "__call__", already_eager=already_eager, args=args, kwargs=kwargs + ) if already_eager: if any( map( @@ -703,7 +720,9 @@ def __call__(self, *args, already_eager=False, **kwargs): # The function was already converted into onnx # reuse it or create a new one for different types. res = self.jit_call(*values, **kwargs) - self.info("-", "1__call__") + self.info( + "-", "1__call__", already_eager=already_eager, args=args, kwargs=kwargs + ) else: # tries to call the version try: @@ -732,7 +751,9 @@ def __call__(self, *args, already_eager=False, **kwargs): # to be converted into onnx. res = self.jit_call(*values, **kwargs) self._eager_cache = True - self.info("-", "2__call__") + self.info( + "-", "2__call__", already_eager=already_eager, args=args, kwargs=kwargs + ) if already_eager: return tuple(res) return self.cast_from_tensor_class(res) From 5ef4f38e70dc531bb9f42d991612c28e627a5864 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Mon, 26 Jun 2023 11:31:21 +0200 Subject: [PATCH 10/20] refactoring --- onnx_array_api/array_api/__init__.py | 37 +++- onnx_array_api/array_api/_onnx_common.py | 205 +++++++++++++++++++++- onnx_array_api/array_api/onnx_numpy.py | 206 +++-------------------- onnx_array_api/array_api/onnx_ort.py | 81 ++------- onnx_array_api/npx/npx_tensors.py | 9 + pyproject.toml | 3 +- 6 files changed, 287 insertions(+), 254 deletions(-) diff --git a/onnx_array_api/array_api/__init__.py b/onnx_array_api/array_api/__init__.py index cc64b8e..0ea7172 100644 --- a/onnx_array_api/array_api/__init__.py +++ b/onnx_array_api/array_api/__init__.py @@ -1,3 +1,5 @@ +from typing import Any, Callable, List, Dict +import warnings import numpy as np from onnx import TensorProto from .._helpers import np_dtype_to_tensor_dtype @@ -32,11 +34,37 @@ def _iinfo(dtype): return nres -def _finalize_array_api(module): +def array_api_wrap_function(f: Callable, TEagerTensor: type) -> Callable: + """ + Converts an eager function takeing EagerTensor into a function + available through an Array API. + + :param callable: function + :param TEagerTensor: EagerTensor class + :return: new function + """ + + def wrap(*args: List[Any], **kwargs: Dict[str, Any]) -> Any: + new_args = [] + for a in args: + if isinstance(a, np.ndarray): + b = TEagerTensor(a) + else: + b = a + new_args.append(b) + return f(TEagerTensor, *new_args, **kwargs) + + wrap.__doc__ = f.__doc__ + return wrap + + +def _finalize_array_api(module, function_names, TEagerTensor): """ Adds common attributes to Array API defined in this modules such as types. """ + from . import _onnx_common + module.float16 = DType(TensorProto.FLOAT16) module.float32 = DType(TensorProto.FLOAT) module.float64 = DType(TensorProto.DOUBLE) @@ -53,3 +81,10 @@ def _finalize_array_api(module): setattr(module, "str", DType(TensorProto.STRING)) setattr(module, "finfo", _finfo) setattr(module, "iinfo", _iinfo) + + for name in function_names: + f = getattr(_onnx_common, name, None) + if f is None: + warnings.warn(f"Function {name} is not available in {module}!r") + continue + setattr(module, name, array_api_wrap_function(f, TEagerTensor)) diff --git a/onnx_array_api/array_api/_onnx_common.py b/onnx_array_api/array_api/_onnx_common.py index 8bbab9a..38fc7d0 100644 --- a/onnx_array_api/array_api/_onnx_common.py +++ b/onnx_array_api/array_api/_onnx_common.py @@ -1,20 +1,61 @@ from typing import Any, Optional import numpy as np -from ..npx.npx_types import DType +from ..npx.npx_types import ( + DType, + ElemType, + OptParType, + OptTensorType, + ParType, + Scalar, + TensorType, +) +from ..npx.npx_tensors import EagerTensor from ..npx.npx_array_api import BaseArrayApi from ..npx.npx_functions import ( - copy as copy_inline, + abs as generic_abs, + absolute as generic_absolute, + all as generic_all, + arange as generic_arange, + astype as generic_astype, + copy as generic_copy, + equal as generic_equal, + full as generic_full, + isdtype as generic_isdtype, + isfinite as generic_isfinite, + isnan as generic_isnan, + ones as generic_ones, + ones_like as generic_ones_like, + reshape as generic_reshape, + take as generic_take, + zeros as generic_zeros, ) -def template_asarray( +def abs(TEagerTensor: type, *args, **kwargs): + return generic_abs(*args, **kwargs) + + +def absolute(TEagerTensor: type, *args, **kwargs): + return generic_absolute(*args, **kwargs) + + +def all(TEagerTensor: type, *args, **kwargs): + return generic_all(*args, **kwargs) + + +def asarray( TEagerTensor: type, a: Any, + /, + *, dtype: Optional[DType] = None, order: Optional[str] = None, like: Any = None, copy: bool = False, -) -> Any: +) -> EagerTensor: + """ + Converts anything into an array. + """ """ Converts anything into an array. """ @@ -60,3 +101,159 @@ def template_asarray( else: vt = v return vt + + +def arange( + TEagerTensor: type, + start_or_stop: EagerTensor[TensorType[ElemType.int64, "I", (1,)]], + stop_or_step: EagerTensor[OptTensorType[ElemType.int64, "I", (1,)]] = None, + step: EagerTensor[OptTensorType[ElemType.int64, "I", (1,)]] = None, + dtype: OptParType[DType] = None, +) -> EagerTensor[TensorType[ElemType.numerics, "T"]]: + use_float = any( + map(lambda x: isinstance(x, float), [start_or_stop, stop_or_step, step]) + ) + if isinstance(start_or_stop, int): + start_or_stop = TEagerTensor( + np.array([start_or_stop], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(start_or_stop, float): + start_or_stop = TEagerTensor(np.array([start_or_stop], dtype=np.float64)) + assert use_float + + if isinstance(stop_or_step, int): + stop_or_step = TEagerTensor( + np.array([stop_or_step], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(stop_or_step, float): + stop_or_step = TEagerTensor(np.array([stop_or_step], dtype=np.float64)) + assert use_float + + if isinstance(step, int): + step = TEagerTensor( + np.array([step], dtype=np.float64 if use_float else np.int64) + ) + elif isinstance(step, float): + step = TEagerTensor(np.array([step], dtype=np.float64)) + assert use_float + + if dtype is None and use_float: + dtype = DType(TensorProto.DOUBLE) + return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) + + +def astype(TEagerTensor: type, *args, **kwargs): + return generic_astype(*args, **kwargs) + + +def copy(TEagerTensor: type, *args, **kwargs): + return generic_copy(*args, **kwargs) + + +def empty( + TEagerTensor: type, + shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], + *, + dtype: OptParType[DType] = None, + order: OptParType[str] = "C", +) -> EagerTensor[TensorType[ElemType.numerics, "T"]]: + raise RuntimeError( + "ONNX assumes there is no inplace implementation. " + "empty function is only used in that case." + ) + + +def equal(TEagerTensor: type, *args, **kwargs): + return generic_equal(*args, **kwargs) + + +def full( + TEagerTensor: type, + shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], + fill_value: ParType[Scalar] = None, + *, + dtype: OptParType[DType] = None, + order: OptParType[str] = "C", +) -> EagerTensor[TensorType[ElemType.numerics, "T"]]: + if fill_value is None: + raise TypeError("fill_value cannot be None") + value = fill_value + if isinstance(shape, tuple): + return generic_full( + TEagerTensor(np.array(shape, dtype=np.int64)), + fill_value=value, + dtype=dtype, + order=order, + ) + if isinstance(shape, int): + return generic_full( + TEagerTensor(np.array([shape], dtype=np.int64)), + fill_value=value, + dtype=dtype, + order=order, + ) + return generic_full(shape, fill_value=value, dtype=dtype, order=order) + + +def isdtype(TEagerTensor: type, *args, **kwargs): + return generic_isdtype(*args, **kwargs) + + +def isfinite(TEagerTensor: type, *args, **kwargs): + return generic_isfinite(*args, **kwargs) + + +def isnan(TEagerTensor: type, *args, **kwargs): + return generic_isnan(*args, **kwargs) + + +def ones( + TEagerTensor: type, + shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], + *, + dtype: OptParType[DType] = None, + order: OptParType[str] = "C", +) -> EagerTensor[TensorType[ElemType.numerics, "T"]]: + if isinstance(shape, tuple): + return generic_ones( + TEagerTensor(np.array(shape, dtype=np.int64)), dtype=dtype, order=order + ) + if isinstance(shape, int): + return generic_ones( + TEagerTensor(np.array([shape], dtype=np.int64)), + dtype=dtype, + order=order, + ) + return generic_ones(shape, dtype=dtype, order=order) + + +def ones_like(TEagerTensor: type, *args, **kwargs): + return generic_ones_like(*args, **kwargs) + + +def reshape(TEagerTensor: type, *args, **kwargs): + return generic_reshape(*args, **kwargs) + + +def take(TEagerTensor: type, *args, **kwargs): + return generic_take(*args, **kwargs) + + +def zeros( + TEagerTensor: type, + shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], + *, + dtype: OptParType[DType] = None, + order: OptParType[str] = "C", +) -> EagerTensor[TensorType[ElemType.numerics, "T"]]: + if isinstance(shape, tuple): + return generic_zeros( + TEagerTensor(np.array(shape, dtype=np.int64)), dtype=dtype, order=order + ) + if isinstance(shape, int): + return generic_zeros( + TEagerTensor(np.array([shape], dtype=np.int64)), + dtype=dtype, + order=order, + ) + return generic_zeros(shape, dtype=dtype, order=order) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index edd6e50..14741f0 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -1,191 +1,9 @@ """ Array API valid for an :class:`EagerNumpyTensor`. """ -from typing import Any, Optional -import numpy as np -from onnx import TensorProto -from ..npx.npx_functions import ( - all, - abs, - absolute, - astype, - equal, - isdtype, - isfinite, - isnan, - ones_like, - reshape, - take, -) -from ..npx.npx_functions import arange as generic_arange -from ..npx.npx_functions import full as generic_full -from ..npx.npx_functions import ones as generic_ones -from ..npx.npx_functions import zeros as generic_zeros from ..npx.npx_numpy_tensors import EagerNumpyTensor -from ..npx.npx_types import ( - DType, - ElemType, - TensorType, - OptParType, - OptTensorType, - ParType, - Scalar, -) -from ._onnx_common import template_asarray from . import _finalize_array_api -__all__ = [ - "abs", - "absolute", - "all", - "arange", - "asarray", - "astype", - "empty", - "equal", - "full", - "isdtype", - "isfinite", - "isnan", - "ones", - "ones_like", - "reshape", - "take", - "zeros", -] - - -def asarray( - a: Any, - /, - *, - dtype: Optional[DType] = None, - order: Optional[str] = None, - like: Any = None, - copy: bool = False, -) -> EagerNumpyTensor: - """ - Converts anything into an array. - """ - return template_asarray( - EagerNumpyTensor, a, dtype=dtype, order=order, like=like, copy=copy - ) - - -def arange( - start_or_stop: TensorType[ElemType.int64, "I", (1,)], - stop_or_step: OptTensorType[ElemType.int64, "I", (1,)] = None, - step: OptTensorType[ElemType.int64, "I", (1,)] = None, - dtype: OptParType[DType] = None, -) -> TensorType[ElemType.numerics, "T"]: - use_float = any( - map(lambda x: isinstance(x, float), [start_or_stop, stop_or_step, step]) - ) - if isinstance(start_or_stop, int): - start_or_stop = EagerNumpyTensor( - np.array([start_or_stop], dtype=np.float64 if use_float else np.int64) - ) - elif isinstance(start_or_stop, float): - start_or_stop = EagerNumpyTensor(np.array([start_or_stop], dtype=np.float64)) - assert use_float - - if isinstance(stop_or_step, int): - stop_or_step = EagerNumpyTensor( - np.array([stop_or_step], dtype=np.float64 if use_float else np.int64) - ) - elif isinstance(stop_or_step, float): - stop_or_step = EagerNumpyTensor(np.array([stop_or_step], dtype=np.float64)) - assert use_float - - if isinstance(step, int): - step = EagerNumpyTensor( - np.array([step], dtype=np.float64 if use_float else np.int64) - ) - elif isinstance(step, float): - step = EagerNumpyTensor(np.array([step], dtype=np.float64)) - assert use_float - - if dtype is None and use_float: - dtype = DType(TensorProto.DOUBLE) - return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) - - -def ones( - shape: TensorType[ElemType.int64, "I", (None,)], - *, - dtype: OptParType[DType] = None, - order: OptParType[str] = "C", -) -> TensorType[ElemType.numerics, "T"]: - if isinstance(shape, tuple): - return generic_ones( - EagerNumpyTensor(np.array(shape, dtype=np.int64)), dtype=dtype, order=order - ) - if isinstance(shape, int): - return generic_ones( - EagerNumpyTensor(np.array([shape], dtype=np.int64)), - dtype=dtype, - order=order, - ) - return generic_ones(shape, dtype=dtype, order=order) - - -def empty( - shape: TensorType[ElemType.int64, "I", (None,)], - *, - dtype: OptParType[DType] = None, - order: OptParType[str] = "C", -) -> TensorType[ElemType.numerics, "T"]: - raise RuntimeError( - "ONNX assumes there is no inplace implementation. " - "empty function is only used in that case." - ) - - -def zeros( - shape: TensorType[ElemType.int64, "I", (None,)], - *, - dtype: OptParType[DType] = None, - order: OptParType[str] = "C", -) -> TensorType[ElemType.numerics, "T"]: - if isinstance(shape, tuple): - return generic_zeros( - EagerNumpyTensor(np.array(shape, dtype=np.int64)), dtype=dtype, order=order - ) - if isinstance(shape, int): - return generic_zeros( - EagerNumpyTensor(np.array([shape], dtype=np.int64)), - dtype=dtype, - order=order, - ) - return generic_zeros(shape, dtype=dtype, order=order) - - -def full( - shape: TensorType[ElemType.int64, "I", (None,)], - fill_value: ParType[Scalar] = None, - *, - dtype: OptParType[DType] = None, - order: OptParType[str] = "C", -) -> TensorType[ElemType.numerics, "T"]: - if fill_value is None: - raise TypeError("fill_value cannot be None") - value = fill_value - if isinstance(shape, tuple): - return generic_full( - EagerNumpyTensor(np.array(shape, dtype=np.int64)), - fill_value=value, - dtype=dtype, - order=order, - ) - if isinstance(shape, int): - return generic_full( - EagerNumpyTensor(np.array([shape], dtype=np.int64)), - fill_value=value, - dtype=dtype, - order=order, - ) - return generic_full(shape, fill_value=value, dtype=dtype, order=order) - def _finalize(): """ @@ -194,7 +12,29 @@ def _finalize(): """ from . import onnx_numpy - _finalize_array_api(onnx_numpy) + _finalize_array_api( + onnx_numpy, + [ + "abs", + "absolute", + "all", + "arange", + "asarray", + "astype", + "empty", + "equal", + "full", + "isdtype", + "isfinite", + "isnan", + "ones", + "ones_like", + "reshape", + "take", + "zeros", + ], + EagerNumpyTensor, + ) _finalize() diff --git a/onnx_array_api/array_api/onnx_ort.py b/onnx_array_api/array_api/onnx_ort.py index 56f6444..eac55f5 100644 --- a/onnx_array_api/array_api/onnx_ort.py +++ b/onnx_array_api/array_api/onnx_ort.py @@ -1,72 +1,9 @@ """ Array API valid for an :class:`EagerOrtTensor`. """ -from typing import Optional, Any -import numpy as np -from onnx import TensorProto from ..ort.ort_tensors import EagerOrtTensor -from ..npx.npx_functions import ( - all, - abs, - absolute, - astype, - equal, - isdtype, - isnan, - isfinite, - reshape, - take, -) -from ..npx.npx_types import DType, ElemType, TensorType, OptParType -from ..npx.npx_functions import zeros as generic_zeros -from ._onnx_common import template_asarray from . import _finalize_array_api -__all__ = [ - "all", - "abs", - "absolute", - "asarray", - "astype", - "equal", - "isdtype", - "isfinite", - "isnan", - "reshape", - "take", -] - - -def asarray( - a: Any, - dtype: Optional[DType] = None, - order: Optional[str] = None, - like: Any = None, - copy: bool = False, -) -> EagerOrtTensor: - """ - Converts anything into an array. - """ - return template_asarray( - EagerOrtTensor, a, dtype=dtype, order=order, like=like, copy=copy - ) - - -def zeros( - shape: TensorType[ElemType.int64, "I", (None,)], - dtype: OptParType[DType] = DType(TensorProto.FLOAT), - order: OptParType[str] = "C", -) -> TensorType[ElemType.numerics, "T"]: - if isinstance(shape, tuple): - return generic_zeros( - EagerOrtTensor(np.array(shape, dtype=np.int64)), dtype=dtype, order=order - ) - if isinstance(shape, int): - return generic_zeros( - EagerOrtTensor(np.array([shape], dtype=np.int64)), dtype=dtype, order=order - ) - return generic_zeros(shape, dtype=dtype, order=order) - def _finalize(): """ @@ -75,7 +12,23 @@ def _finalize(): """ from . import onnx_ort - _finalize_array_api(onnx_ort) + _finalize_array_api( + onnx_ort, + [ + "all", + "abs", + "absolute", + "asarray", + "astype", + "equal", + "isdtype", + "isfinite", + "isnan", + "reshape", + "take", + ], + EagerOrtTensor, + ) _finalize() diff --git a/onnx_array_api/npx/npx_tensors.py b/onnx_array_api/npx/npx_tensors.py index 9286ae2..b07e2d4 100644 --- a/onnx_array_api/npx/npx_tensors.py +++ b/onnx_array_api/npx/npx_tensors.py @@ -20,6 +20,15 @@ class EagerTensor(BaseArrayApi): :class:`BaseArrayApi`. """ + @classmethod + def __class_getitem__(cls, tensor_type: type): + """ + Returns tensor_type. + """ + if not issubclass(tensor_type, TensorType): + raise TypeError(f"Unexpected type {tensor_type!r}.") + return tensor_type + def __iter__(self): """ The :epkg:`Array API` does not define this function (2022/12). diff --git a/pyproject.toml b/pyproject.toml index 9ef84cc..60043b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,8 +30,7 @@ max-complexity = 10 [tool.ruff.per-file-ignores] "_doc/examples/plot_first_example.py" = ["E402", "F811"] "_doc/examples/plot_onnxruntime.py" = ["E402", "F811"] -"onnx_array_api/array_api/onnx_numpy.py" = ["F821"] -"onnx_array_api/array_api/onnx_ort.py" = ["F821"] +"onnx_array_api/array_api/_onnx_common.py" = ["F821"] "onnx_array_api/npx/__init__.py" = ["F401", "F403"] "onnx_array_api/npx/npx_functions.py" = ["F821"] "onnx_array_api/npx/npx_functions_test.py" = ["F821"] From da31640af74a9d4ac4add3b77dc4b7887201b0c2 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Mon, 26 Jun 2023 12:06:41 +0200 Subject: [PATCH 11/20] documentation --- _doc/index.rst | 1 + _doc/techdetails.rst | 78 ++++++++++++++++++++++ _unittests/ut_array_api/test_array_apis.py | 2 +- onnx_array_api/array_api/onnx_ort.py | 1 + 4 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 _doc/techdetails.rst diff --git a/_doc/index.rst b/_doc/index.rst index bd87c3b..ceb2eb2 100644 --- a/_doc/index.rst +++ b/_doc/index.rst @@ -33,6 +33,7 @@ well as to execute it. tutorial/index api/index + techdetails auto_examples/index ../CHANGELOGS diff --git a/_doc/techdetails.rst b/_doc/techdetails.rst new file mode 100644 index 0000000..5bfb2f5 --- /dev/null +++ b/_doc/techdetails.rst @@ -0,0 +1,78 @@ + +Technical details +================= + +Implementing the full array API is not always easy with :epkg:`onnx`. +Python is not strongly typed and many different types can be used +to represent a value. Argument *axis* can be an integer or a tuple +(see `min from Array API +` +for example). On the other side, `ReduceMin from ONNX +`_ +is considered as a tensor. + +Performance ++++++++++++ + +The Array API must work in eager mode and for every operation, +it generates an ONNX graph and executes it with a specific +backend. It can be :epkg:`numpy`, :epkg:`onnxruntime` or any other +backend. The generation of every graph takes a significant amount of time. +It must be avoided. These graphs are cached. But a graph can be reused +only if the inputs - by ONNX semantic - change. If a parameter change, +a new graph must be cached. Method :meth:`JitEager.make_key` +generates a unique key based on the input it receives, +the signature of the function to call. If the key is the same, +a cached onnx can be reused on the second call. + +However, eager mode - use a small single onnx graph for every operation - +is not the most efficient one. At the same time, the design must allow +to merge every needed operation into a bigger graph. +Bigger graphs can be more easily optimized by the backend. + +Input vs parameter +++++++++++++++++++ + +An input is a tensor or array, a parameter is any other type. +Following onnx semantic, an input is variable, a parameter is frozen +cannot be changed. It is a constant. A good design would be +to considered any named input (`**kwargs`) a parameter and +any input (`*args`) a tensor. But the Array API does not follow that +design. Function `astype +_` +takes two inputs. Operator `Cast +_` +takes one input and a frozen parameter `to`. +And python allows `astype(x, dtype)` as well as `astype(x, dtype=dtype)` +unless the signature enforces one call over another type. +There may be ambiguities from time to time. +Beside, from onnx point of view, argument dtype should be named. + +Tensor type ++++++++++++ + +An :class:`EagerTensor` must be used to represent any tensor. +This class defines the backend to use as well. +`EagerNumpyTensor` for :epkg:`numpy`, `EagerOrtTensor` +for :epkg:`onnxruntime`. Since the Array API is new, +existing packages do not fully support the API if they support it +(:epkg:`scikit-learn`). Some numpy array may still be used. + +Inplace ++++++++ + +ONNX has no notion of inplace computation. Therefore something +like `coefs[:, 1] = 1` is not valid unless some code is written +to create another tensor. The current design supports some of these +by storing every call to `__setitem__`. The user sees `coefs` +but the framework sees that `coefs` holds a reference to another +tensor. That's the one the framework needs to use. However, since +`__setitem__` is used for efficiency, it becomes less than efficient +with this design and should be avoided. This assumption may be true +when the backend is relying on CPU but not on GPU. +A function such as `empty +`_ should be avoided as it +has to be followed by calls to `__setitem__`. diff --git a/_unittests/ut_array_api/test_array_apis.py b/_unittests/ut_array_api/test_array_apis.py index 9a8dd7c..51ce9e6 100644 --- a/_unittests/ut_array_api/test_array_apis.py +++ b/_unittests/ut_array_api/test_array_apis.py @@ -18,7 +18,7 @@ def test_zeros_numpy_1(self): def test_zeros_ort_1(self): c = xpo.zeros(1) d = c.numpy() - self.assertEqualArray(np.array([0], dtype=np.float32), d) + self.assertEqualArray(np.array([0], dtype=np.float64), d) def test_ffinfo(self): dt = np.float32 diff --git a/onnx_array_api/array_api/onnx_ort.py b/onnx_array_api/array_api/onnx_ort.py index eac55f5..e067cd7 100644 --- a/onnx_array_api/array_api/onnx_ort.py +++ b/onnx_array_api/array_api/onnx_ort.py @@ -26,6 +26,7 @@ def _finalize(): "isnan", "reshape", "take", + "zeros", ], EagerOrtTensor, ) From cfeb1ff07002b71743e4003afc3c927e5e48584c Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 00:55:39 +0200 Subject: [PATCH 12/20] more coverage for the functions --- _unittests/ut_npx/test_sklearn_array_api.py | 6 +- .../ut_ort/test_sklearn_array_api_ort.py | 11 +-- onnx_array_api/array_api/__init__.py | 27 ++++++++ onnx_array_api/array_api/_onnx_common.py | 15 +++++ onnx_array_api/array_api/onnx_numpy.py | 24 +------ onnx_array_api/array_api/onnx_ort.py | 19 +----- onnx_array_api/npx/npx_functions.py | 67 +++++++++++++++++-- 7 files changed, 116 insertions(+), 53 deletions(-) diff --git a/_unittests/ut_npx/test_sklearn_array_api.py b/_unittests/ut_npx/test_sklearn_array_api.py index aaf2a4f..e0dce24 100644 --- a/_unittests/ut_npx/test_sklearn_array_api.py +++ b/_unittests/ut_npx/test_sklearn_array_api.py @@ -18,8 +18,10 @@ class TestSklearnArrayAPI(ExtTestCase): ) @ignore_warnings(DeprecationWarning) def test_sklearn_array_api_linear_discriminant(self): - X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) - y = np.array([1, 1, 1, 2, 2, 2]) + X = np.array( + [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], dtype=np.float64 + ) + y = np.array([1, 1, 1, 2, 2, 2], dtype=np.int64) ana = LinearDiscriminantAnalysis() ana.fit(X, y) expected = ana.predict(X) diff --git a/_unittests/ut_ort/test_sklearn_array_api_ort.py b/_unittests/ut_ort/test_sklearn_array_api_ort.py index 68e6725..e4692df 100644 --- a/_unittests/ut_ort/test_sklearn_array_api_ort.py +++ b/_unittests/ut_ort/test_sklearn_array_api_ort.py @@ -17,8 +17,10 @@ class TestSklearnArrayAPIOrt(ExtTestCase): reason="reshape ArrayAPI not followed", ) def test_sklearn_array_api_linear_discriminant(self): - X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) - y = np.array([1, 1, 1, 2, 2, 2]) + X = np.array( + [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], dtype=np.float64 + ) + y = np.array([1, 1, 1, 2, 2, 2], dtype=np.int64) ana = LinearDiscriminantAnalysis() ana.fit(X, y) expected = ana.predict(X) @@ -34,6 +36,7 @@ def test_sklearn_array_api_linear_discriminant(self): if __name__ == "__main__": - # import logging - # logging.basicConfig(level=logging.DEBUG) + import logging + + logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/__init__.py b/onnx_array_api/array_api/__init__.py index 0ea7172..7c6b056 100644 --- a/onnx_array_api/array_api/__init__.py +++ b/onnx_array_api/array_api/__init__.py @@ -6,6 +6,30 @@ from ..npx.npx_types import DType +supported_functions = [ + "abs", + "absolute", + "all", + "any", + "arange", + "asarray", + "astype", + "empty", + "equal", + "full", + "isdtype", + "isfinite", + "isinf", + "isnan", + "ones", + "ones_like", + "reshape", + "sum", + "take", + "zeros", +] + + def _finfo(dtype): """ Similar to :class:`numpy.finfo`. @@ -82,6 +106,9 @@ def _finalize_array_api(module, function_names, TEagerTensor): setattr(module, "finfo", _finfo) setattr(module, "iinfo", _iinfo) + if function_names is None: + function_names = supported_functions + for name in function_names: f = getattr(_onnx_common, name, None) if f is None: diff --git a/onnx_array_api/array_api/_onnx_common.py b/onnx_array_api/array_api/_onnx_common.py index 38fc7d0..66b7641 100644 --- a/onnx_array_api/array_api/_onnx_common.py +++ b/onnx_array_api/array_api/_onnx_common.py @@ -15,6 +15,7 @@ abs as generic_abs, absolute as generic_absolute, all as generic_all, + any as generic_any, arange as generic_arange, astype as generic_astype, copy as generic_copy, @@ -22,10 +23,12 @@ full as generic_full, isdtype as generic_isdtype, isfinite as generic_isfinite, + isinf as generic_isinf, isnan as generic_isnan, ones as generic_ones, ones_like as generic_ones_like, reshape as generic_reshape, + sum as generic_sum, take as generic_take, zeros as generic_zeros, ) @@ -43,6 +46,10 @@ def all(TEagerTensor: type, *args, **kwargs): return generic_all(*args, **kwargs) +def any(TEagerTensor: type, *args, **kwargs): + return generic_any(*args, **kwargs) + + def asarray( TEagerTensor: type, a: Any, @@ -203,6 +210,10 @@ def isfinite(TEagerTensor: type, *args, **kwargs): return generic_isfinite(*args, **kwargs) +def isinf(TEagerTensor: type, *args, **kwargs): + return generic_isinf(*args, **kwargs) + + def isnan(TEagerTensor: type, *args, **kwargs): return generic_isnan(*args, **kwargs) @@ -235,6 +246,10 @@ def reshape(TEagerTensor: type, *args, **kwargs): return generic_reshape(*args, **kwargs) +def sum(TEagerTensor: type, *args, **kwargs): + return generic_sum(*args, **kwargs) + + def take(TEagerTensor: type, *args, **kwargs): return generic_take(*args, **kwargs) diff --git a/onnx_array_api/array_api/onnx_numpy.py b/onnx_array_api/array_api/onnx_numpy.py index 14741f0..cf39774 100644 --- a/onnx_array_api/array_api/onnx_numpy.py +++ b/onnx_array_api/array_api/onnx_numpy.py @@ -12,29 +12,7 @@ def _finalize(): """ from . import onnx_numpy - _finalize_array_api( - onnx_numpy, - [ - "abs", - "absolute", - "all", - "arange", - "asarray", - "astype", - "empty", - "equal", - "full", - "isdtype", - "isfinite", - "isnan", - "ones", - "ones_like", - "reshape", - "take", - "zeros", - ], - EagerNumpyTensor, - ) + _finalize_array_api(onnx_numpy, None, EagerNumpyTensor) _finalize() diff --git a/onnx_array_api/array_api/onnx_ort.py b/onnx_array_api/array_api/onnx_ort.py index e067cd7..e1427e1 100644 --- a/onnx_array_api/array_api/onnx_ort.py +++ b/onnx_array_api/array_api/onnx_ort.py @@ -12,24 +12,7 @@ def _finalize(): """ from . import onnx_ort - _finalize_array_api( - onnx_ort, - [ - "all", - "abs", - "absolute", - "asarray", - "astype", - "equal", - "isdtype", - "isfinite", - "isnan", - "reshape", - "take", - "zeros", - ], - EagerOrtTensor, - ) + _finalize_array_api(onnx_ort, None, EagerOrtTensor) _finalize() diff --git a/onnx_array_api/npx/npx_functions.py b/onnx_array_api/npx/npx_functions.py index d49d400..94de749 100644 --- a/onnx_array_api/npx/npx_functions.py +++ b/onnx_array_api/npx/npx_functions.py @@ -54,12 +54,6 @@ def all( See :func:`numpy.all`. If input x is empty, the answer is True. """ - # size = var(x, op="Size") - # empty = var(size, cst(np.array(0, dtype=np.int64)), op="Equal") - - # z = make_tensor_value_info("Z", TensorProto.BOOL, [1]) - # g1 = make_graph([make_node("Constant", [], ["Z"], value_bool=[True])], [], [z]) - xi = var(x, op="Cast", to=TensorProto.INT64) if axis is None: @@ -106,6 +100,35 @@ def amin( return var(x, op="ArgMin", axis=axis, keepdims=keepdims) +@npxapi_inline +def any( + x: TensorType[ElemType.bool_, "T"], + /, + *, + axis: OptTensorType[ElemType.int64, "I"] = None, + keepdims: ParType[int] = 0, +) -> TensorType[ElemType.bool_, "T"]: + """ + See :func:`numpy.any`. + """ + xi = var(x, op="Cast", to=TensorProto.INT64) + + if axis is None: + new_shape = cst(np.array([-1], dtype=np.int64)) + xifl = var(xi, new_shape, op="Reshape") + # in case xifl is empty, we need to add one element + one = cst(np.array([0], dtype=np.int64)) + xifl1 = var(xifl, one, op="Concat", axis=0) + red = xifl1.max(keepdims=keepdims) + else: + if isinstance(axis, int): + axis = [axis] + if isinstance(axis, (tuple, list)): + axis = cst(np.array(axis, dtype=np.int64)) + red = xi.max(axis, keepdims=keepdims) + return var(red, cst(1), op="Equal") + + @npxapi_inline def arange( start_or_stop: TensorType[ @@ -550,6 +573,12 @@ def isfinite( return var(x, op="IsInf") +@npxapi_inline +def isinf(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.bool_, "T1"]: + "See :func:`numpy.isnan`." + return var(x, op="IsInf") + + @npxapi_inline def isnan(x: TensorType[ElemType.numerics, "T"], /) -> TensorType[ElemType.bool_, "T1"]: "See :func:`numpy.isnan`." @@ -756,6 +785,32 @@ def squeeze( return var(x, axis, op="Squeeze") +@npxapi_inline +def sum( + x: TensorType[ElemType.numerics, "T"], + /, + axis: OptTensorType[ElemType.int64, "I"] = None, + *, + dtype: OptParType[DType] = None, + keepdims: ParType[int] = 0, +) -> TensorType[ElemType.numerics, "T"]: + "See :func:`numpy.sum`." + if axis is None: + m1 = cst(np.array([-1], dtype=np.int64)) + flat = var(x, m1, op="Reshape") + axis = cst(np.array([0], dtype=np.int64)) + res = var(flat, axis, op="ReduceSum", keepdims=keepdims) + else: + if isinstance(axis, int): + axis = [axis] + elif isinstance(axis, (tuple, list)): + axis = cst(np.array(axis, dtype=np.int64)) + res = var(x, axis, op="Sum", keepdims=keepdims) + if dtype is None: + return res + return var(res, op="Cast", to=dtype.code) + + @npxapi_inline def take( data: TensorType[ElemType.numerics, "T"], From 4b35eac45c5a011952b8e5ee17b46d855c328c2a Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 01:12:07 +0200 Subject: [PATCH 13/20] refactoring --- _unittests/ut_npx/test_sklearn_array_api.py | 26 ++++++- _unittests/ut_ort/test_ort_tensor.py | 1 - .../ut_ort/test_sklearn_array_api_ort.py | 26 ++++++- onnx_array_api/array_api/__init__.py | 10 ++- onnx_array_api/array_api/_onnx_common.py | 74 +------------------ 5 files changed, 60 insertions(+), 77 deletions(-) diff --git a/_unittests/ut_npx/test_sklearn_array_api.py b/_unittests/ut_npx/test_sklearn_array_api.py index e0dce24..083c009 100644 --- a/_unittests/ut_npx/test_sklearn_array_api.py +++ b/_unittests/ut_npx/test_sklearn_array_api.py @@ -34,9 +34,31 @@ def test_sklearn_array_api_linear_discriminant(self): got = ana.predict(new_x) self.assertEqualArray(expected, got.numpy()) + @unittest.skipIf( + Version(sklearn_version) <= Version("1.2.2"), + reason="reshape ArrayAPI not followed", + ) + @ignore_warnings(DeprecationWarning) + def test_sklearn_array_api_linear_discriminant_float32(self): + X = np.array( + [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], dtype=np.float32 + ) + y = np.array([1, 1, 1, 2, 2, 2], dtype=np.int64) + ana = LinearDiscriminantAnalysis() + ana.fit(X, y) + expected = ana.predict(X) + + new_x = EagerNumpyTensor(X) + self.assertStartsWith("EagerNumpyTensor(array([[", repr(new_x)) + with config_context(array_api_dispatch=True): + # It fails if scikit-learn <= 1.2.2 because the ArrayAPI + # is not strictly applied. + got = ana.predict(new_x) + self.assertEqualArray(expected, got.numpy()) + if __name__ == "__main__": - import logging + # import logging - logging.basicConfig(level=logging.DEBUG) + # logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/_unittests/ut_ort/test_ort_tensor.py b/_unittests/ut_ort/test_ort_tensor.py index a2c8f15..cb4377d 100644 --- a/_unittests/ut_ort/test_ort_tensor.py +++ b/_unittests/ut_ort/test_ort_tensor.py @@ -238,5 +238,4 @@ def impl(A): if __name__ == "__main__": - # TestNpx().test_eager_numpy() unittest.main(verbosity=2) diff --git a/_unittests/ut_ort/test_sklearn_array_api_ort.py b/_unittests/ut_ort/test_sklearn_array_api_ort.py index e4692df..330f74b 100644 --- a/_unittests/ut_ort/test_sklearn_array_api_ort.py +++ b/_unittests/ut_ort/test_sklearn_array_api_ort.py @@ -34,9 +34,31 @@ def test_sklearn_array_api_linear_discriminant(self): got = ana.predict(new_x) self.assertEqualArray(expected, got.numpy()) + @unittest.skipIf( + Version(sklearn_version) <= Version("1.2.2"), + reason="reshape ArrayAPI not followed", + ) + def test_sklearn_array_api_linear_discriminant_float32(self): + X = np.array( + [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], dtype=np.float32 + ) + y = np.array([1, 1, 1, 2, 2, 2], dtype=np.int64) + ana = LinearDiscriminantAnalysis() + ana.fit(X, y) + expected = ana.predict(X) + + new_x = EagerOrtTensor(OrtTensor.from_array(X)) + self.assertEqual(new_x.device_name, "Cpu") + self.assertStartsWith( + "EagerOrtTensor(OrtTensor.from_array(array([[", repr(new_x) + ) + with config_context(array_api_dispatch=True): + got = ana.predict(new_x) + self.assertEqualArray(expected, got.numpy()) + if __name__ == "__main__": - import logging + # import logging - logging.basicConfig(level=logging.DEBUG) + # logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2) diff --git a/onnx_array_api/array_api/__init__.py b/onnx_array_api/array_api/__init__.py index 7c6b056..e1e09b8 100644 --- a/onnx_array_api/array_api/__init__.py +++ b/onnx_array_api/array_api/__init__.py @@ -4,6 +4,7 @@ from onnx import TensorProto from .._helpers import np_dtype_to_tensor_dtype from ..npx.npx_types import DType +from ..npx import npx_functions supported_functions = [ @@ -112,6 +113,11 @@ def _finalize_array_api(module, function_names, TEagerTensor): for name in function_names: f = getattr(_onnx_common, name, None) if f is None: - warnings.warn(f"Function {name} is not available in {module}!r") - continue + f2 = getattr(npx_functions, name, None) + if f2 is None: + warnings.warn(f"Function {name!r} is not available in {module!r}.") + continue + f = lambda TEagerTensor, *args, _f=f2, **kwargs: _f( # noqa: E731 + *args, **kwargs + ) setattr(module, name, array_api_wrap_function(f, TEagerTensor)) diff --git a/onnx_array_api/array_api/_onnx_common.py b/onnx_array_api/array_api/_onnx_common.py index 66b7641..8f71455 100644 --- a/onnx_array_api/array_api/_onnx_common.py +++ b/onnx_array_api/array_api/_onnx_common.py @@ -13,43 +13,21 @@ from ..npx.npx_array_api import BaseArrayApi from ..npx.npx_functions import ( abs as generic_abs, - absolute as generic_absolute, - all as generic_all, - any as generic_any, arange as generic_arange, - astype as generic_astype, - copy as generic_copy, - equal as generic_equal, full as generic_full, - isdtype as generic_isdtype, - isfinite as generic_isfinite, - isinf as generic_isinf, - isnan as generic_isnan, ones as generic_ones, - ones_like as generic_ones_like, - reshape as generic_reshape, - sum as generic_sum, - take as generic_take, zeros as generic_zeros, ) +# These functions with no specific code do not have to be +# implemented. They are automatically added in +# :mod:`onnx_array_api.array_api`. It needs +# to be added to `onnx_array_api.array_api.supported_functions`. def abs(TEagerTensor: type, *args, **kwargs): return generic_abs(*args, **kwargs) -def absolute(TEagerTensor: type, *args, **kwargs): - return generic_absolute(*args, **kwargs) - - -def all(TEagerTensor: type, *args, **kwargs): - return generic_all(*args, **kwargs) - - -def any(TEagerTensor: type, *args, **kwargs): - return generic_any(*args, **kwargs) - - def asarray( TEagerTensor: type, a: Any, @@ -149,14 +127,6 @@ def arange( return generic_arange(start_or_stop, stop_or_step, step, dtype=dtype) -def astype(TEagerTensor: type, *args, **kwargs): - return generic_astype(*args, **kwargs) - - -def copy(TEagerTensor: type, *args, **kwargs): - return generic_copy(*args, **kwargs) - - def empty( TEagerTensor: type, shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], @@ -170,10 +140,6 @@ def empty( ) -def equal(TEagerTensor: type, *args, **kwargs): - return generic_equal(*args, **kwargs) - - def full( TEagerTensor: type, shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], @@ -202,22 +168,6 @@ def full( return generic_full(shape, fill_value=value, dtype=dtype, order=order) -def isdtype(TEagerTensor: type, *args, **kwargs): - return generic_isdtype(*args, **kwargs) - - -def isfinite(TEagerTensor: type, *args, **kwargs): - return generic_isfinite(*args, **kwargs) - - -def isinf(TEagerTensor: type, *args, **kwargs): - return generic_isinf(*args, **kwargs) - - -def isnan(TEagerTensor: type, *args, **kwargs): - return generic_isnan(*args, **kwargs) - - def ones( TEagerTensor: type, shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], @@ -238,22 +188,6 @@ def ones( return generic_ones(shape, dtype=dtype, order=order) -def ones_like(TEagerTensor: type, *args, **kwargs): - return generic_ones_like(*args, **kwargs) - - -def reshape(TEagerTensor: type, *args, **kwargs): - return generic_reshape(*args, **kwargs) - - -def sum(TEagerTensor: type, *args, **kwargs): - return generic_sum(*args, **kwargs) - - -def take(TEagerTensor: type, *args, **kwargs): - return generic_take(*args, **kwargs) - - def zeros( TEagerTensor: type, shape: EagerTensor[TensorType[ElemType.int64, "I", (None,)]], From ac40233df8952cb0407e436e2e4e4e15cf719547 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 02:03:33 +0200 Subject: [PATCH 14/20] documentation --- _doc/index.rst | 2 +- _doc/{techdetails.rst => tech/aapi.rst} | 33 +++++++++++++++++++++++-- _doc/tech/index.rst | 7 ++++++ onnx_array_api/npx/npx_array_api.py | 4 ++- onnx_array_api/npx/npx_jit_eager.py | 3 ++- onnx_array_api/npx/npx_tensors.py | 3 ++- 6 files changed, 46 insertions(+), 6 deletions(-) rename _doc/{techdetails.rst => tech/aapi.rst} (73%) create mode 100644 _doc/tech/index.rst diff --git a/_doc/index.rst b/_doc/index.rst index ceb2eb2..79afe2a 100644 --- a/_doc/index.rst +++ b/_doc/index.rst @@ -33,7 +33,7 @@ well as to execute it. tutorial/index api/index - techdetails + tech/index auto_examples/index ../CHANGELOGS diff --git a/_doc/techdetails.rst b/_doc/tech/aapi.rst similarity index 73% rename from _doc/techdetails.rst rename to _doc/tech/aapi.rst index 5bfb2f5..860e544 100644 --- a/_doc/techdetails.rst +++ b/_doc/tech/aapi.rst @@ -1,6 +1,6 @@ -Technical details -================= +Difficulty to implement an an Array API for ONNX +================================================ Implementing the full array API is not always easy with :epkg:`onnx`. Python is not strongly typed and many different types can be used @@ -76,3 +76,32 @@ A function such as `empty `_ should be avoided as it has to be followed by calls to `__setitem__`. + +Eager or compilation +++++++++++++++++++++ + +Eager mode is what the Array API implies. +Every function is converted into an ONNX graph based +on its inputs without any knownledge of how these inputs +were obtained. This graph is then executed before going +to the next call of a function from the API. +The conversion of a machine learned model +into ONNX implies the gathering of all these operations +into a graph. It means using a mode that records all the function +calls to compile every tiny onnx graph into a unique graph. + +Iterators and Reduction ++++++++++++++++++++++++ + +An efficient implementation of function +:func:`numpy.any` or :func:`numpy.all` returns +as soon as the result is known. :func:`numpy.all` is +false whenever the first false condition is met. +Same goes for :func:`numpy.any` which is true +whenever the first true condition is met. +There is no such operator in ONNX (<= 20) because +it is unlikely to appear in a machine learned model. +However, it is highly used when two results are +compared in unit tests. The ONNX implementation is +not efficient due to that reason but it only impacts +the unit tests. \ No newline at end of file diff --git a/_doc/tech/index.rst b/_doc/tech/index.rst new file mode 100644 index 0000000..dca7e11 --- /dev/null +++ b/_doc/tech/index.rst @@ -0,0 +1,7 @@ +Technical Details +================= + +.. toctree:: + :maxdepth: 2 + + aapi \ No newline at end of file diff --git a/onnx_array_api/npx/npx_array_api.py b/onnx_array_api/npx/npx_array_api.py index 58968ae..142a892 100644 --- a/onnx_array_api/npx/npx_array_api.py +++ b/onnx_array_api/npx/npx_array_api.py @@ -61,7 +61,9 @@ def __rmul__(self, ov: "BaseArrayApi") -> "BaseArrayApi": return self.generic_method("__rmul__", ov) def __matmul__(self, ov: "BaseArrayApi") -> "BaseArrayApi": - return self.generic_method("__matmul__", ov) + res = self.generic_method("__matmul__", ov) + # TODO: It works with float32 but not float64. + return res def __truediv__(self, ov: "BaseArrayApi") -> "BaseArrayApi": return self.generic_method("__truediv__", ov) diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 369f3a3..1ea420f 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -65,7 +65,7 @@ def info( logger.info("") return logger.info( - "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s ae=%s", + "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s mekw=%s ae=%s", prefix, self.__class__.__name__, method_name[:6], @@ -77,6 +77,7 @@ def info( self.f.__name__, self.tensor_class.__name__, self.method_name_ or "", + "" if kwargs is None else kwargs.get("method_name", ""), "" if already_eager is None else (1 if already_eager else 0), ) if args is not None or kwargs is not None: diff --git a/onnx_array_api/npx/npx_tensors.py b/onnx_array_api/npx/npx_tensors.py index b07e2d4..8c954c2 100644 --- a/onnx_array_api/npx/npx_tensors.py +++ b/onnx_array_api/npx/npx_tensors.py @@ -141,7 +141,8 @@ def _generic_method_operator(self, method_name, *args: Any, **kwargs: Any) -> An new_args = [] for a in args: if isinstance(a, np.ndarray): - new_args.append(self.__class__(a.astype(self.dtype.np_dtype))) + t = self.__class__(a.astype(self.dtype.np_dtype)) + new_args.append(t) elif isinstance(a, (int, float, bool)): new_args.append( self.__class__(np.array([a]).astype(self.dtype.np_dtype)) From d8425db167cbf3a51cc6ff9a8202163b77bee33d Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 09:36:04 +0200 Subject: [PATCH 15/20] fix issue with garbage collector and OrtTensor initilized from numpy array --- _unittests/ut_array_api/test_onnx_ort.py | 37 +++++++++++++++++++ onnx_array_api/npx/npx_jit_eager.py | 46 ++++++++++++++---------- onnx_array_api/ort/ort_tensors.py | 20 +++++++++-- 3 files changed, 82 insertions(+), 21 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_ort.py b/_unittests/ut_array_api/test_onnx_ort.py index a10b0d0..5945834 100644 --- a/_unittests/ut_array_api/test_onnx_ort.py +++ b/_unittests/ut_array_api/test_onnx_ort.py @@ -2,6 +2,7 @@ import numpy as np from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.array_api import onnx_ort as xp +from onnx_array_api.npx.npx_numpy_tensors import EagerNumpyTensor from onnx_array_api.ort.ort_tensors import EagerOrtTensor as EagerTensor @@ -15,6 +16,42 @@ def test_abs(self): a = xp.absolute(mat) self.assertEqualArray(np.absolute(mat.numpy()), a.numpy()) + def test_matmul(self): + for cls in [EagerTensor, EagerNumpyTensor]: + for dtype in (np.float32, np.float64): + X = cls( + np.array( + [[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]], + dtype=dtype, + ) + ) + coef = cls(np.array([[1e-13, 8]], dtype=dtype).T) + self.assertEqualArray( + np.array([[1e-13, 8]], dtype=dtype), coef.numpy().T + ) + expected = X.numpy() @ coef.numpy() + got = X @ coef + try: + self.assertEqualArray(expected, got.numpy()) + except AssertionError as e: + raise AssertionError( + f"Discrepancies (1) with cls={cls.__name__}, dtype={dtype}" + ) from e + + coef = np.array([[1e-13, 8]], dtype=dtype).T + expected = X.numpy() @ coef + got = X @ coef + try: + self.assertEqualArray(expected, got.numpy()) + except AssertionError as e: + raise AssertionError( + f"Discrepancies (2) with cls={cls.__name__}, dtype={dtype}" + ) from e + if __name__ == "__main__": + # import logging + + # logging.basicConfig(level=logging.DEBUG) + # TestOnnxOrt().test_matmul() unittest.main(verbosity=2) diff --git a/onnx_array_api/npx/npx_jit_eager.py b/onnx_array_api/npx/npx_jit_eager.py index 1ea420f..b49d7ce 100644 --- a/onnx_array_api/npx/npx_jit_eager.py +++ b/onnx_array_api/npx/npx_jit_eager.py @@ -1,9 +1,8 @@ from inspect import signature from logging import getLogger from typing import Any, Callable, Dict, List, Optional, Tuple, Union - import numpy as np - +from onnx import ModelProto from .npx_tensors import EagerTensor, JitTensor from .npx_types import DType, OptTensorType, TensorType from .npx_var import Cst, Input, Var @@ -57,6 +56,8 @@ def info( already_eager: Optional[bool] = None, args: Optional[List[Any]] = None, kwargs: Optional[Dict[str, Any]] = None, + key: Optional[Tuple[Any, ...]] = None, + onx: Optional[ModelProto] = None, ): """ Logs a status. @@ -64,23 +65,29 @@ def info( if prefix is None: logger.info("") return - logger.info( - "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s cl=%s me=%s mekw=%s ae=%s", - prefix, - self.__class__.__name__, - method_name[:6], - len(self.onxs), - self.n_inputs_, - 0 if self.input_to_kwargs_ is None else 1, - 0 if self.kwargs_to_input_ is None else 1, - self.f.__module__, - self.f.__name__, - self.tensor_class.__name__, - self.method_name_ or "", - "" if kwargs is None else kwargs.get("method_name", ""), - "" if already_eager is None else (1 if already_eager else 0), - ) - if args is not None or kwargs is not None: + if key is None: + logger.info( + "%s [%s.%s] nx=%d ni=%d ikw=%d kwi=%d f=%s.%s " + "cl=%s me=%s mekw=%s ae=%s", + prefix, + self.__class__.__name__, + method_name[:6], + len(self.onxs), + self.n_inputs_, + 0 if self.input_to_kwargs_ is None else 1, + 0 if self.kwargs_to_input_ is None else 1, + self.f.__module__, + self.f.__name__, + self.tensor_class.__name__, + self.method_name_ or "", + "" if kwargs is None else kwargs.get("method_name", ""), + "" if already_eager is None else (1 if already_eager else 0), + ) + if method_name in ("jit_call", "jit_call_key") and ( + args is not None or kwargs is not None + ): + if key is not None: + logger.debug("---- key=%s", key) logger.debug( "---- [%s] [%s]", "" if args is None else str(args), @@ -476,6 +483,7 @@ def jit_call(self, *values, **kwargs): values, kwargs = self.move_input_to_kwargs(values, kwargs) key = self.make_key(*values, **kwargs) + self.info("=", "jit_call_key", key=key, args=values, kwargs=kwargs) if self.method_name_ is None and "method_name" in key: pos = list(key).index("method_name") self.method_name_ = key[pos + 2] diff --git a/onnx_array_api/ort/ort_tensors.py b/onnx_array_api/ort/ort_tensors.py index e0435ab..c78fccd 100644 --- a/onnx_array_api/ort/ort_tensors.py +++ b/onnx_array_api/ort/ort_tensors.py @@ -56,7 +56,7 @@ def from_array( """ if device is None: device = OrtTensor.CPU - return OrtTensor(C_OrtValue.ortvalue_from_numpy(value, device)) + return OrtTensor(C_OrtValue.ortvalue_from_numpy(value, device), _hold=value) def numpy(self) -> np.ndarray: """ @@ -73,6 +73,11 @@ class Evaluator: :param input_names: input names :param onx: onnx model :param f: unused except in error messages + :param _hold: :epkg:`onnxruntime` does not copy the data if it comes + from a numpy array on CPU it does not hold any reference on it. + *_hold* is used to stored the underlying numpy array hosting the + data for an OrtTensor if it comes from it. It ensures + the garbage collector does not remove it. """ def __init__( @@ -135,13 +140,24 @@ def run(self, *inputs: List["OrtTensor"]) -> List["OrtTensor"]: ) return list(map(inputs[0].__class__, res)) - def __init__(self, tensor: Union[C_OrtValue, "OrtTensor", np.ndarray]): + def __init__( + self, + tensor: Union[C_OrtValue, "OrtTensor", np.ndarray], + _hold: Optional[np.ndarray] = None, + ): if isinstance(tensor, C_OrtValue): self._tensor = tensor + self._hold = _hold elif isinstance(tensor, OrtTensor): self._tensor = tensor._tensor + self._hold = _hold elif isinstance(tensor, np.ndarray): + if _hold is not None: + raise RuntimeError( + "tensor cannot be a numpy array and _hold be not None." + ) self._tensor = C_OrtValue.ortvalue_from_numpy(tensor, OrtTensor.CPU) + self._hold = tensor else: raise ValueError(f"An OrtValue is expected not {type(tensor)}.") From 24b5911000e6f6d3f0160da1d2ac81d138442659 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 10:19:25 +0200 Subject: [PATCH 16/20] add a unit test for bfloat16 --- _doc/tech/aapi.rst | 8 +++++++- _unittests/ut_array_api/test_onnx_numpy.py | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/_doc/tech/aapi.rst b/_doc/tech/aapi.rst index 860e544..ddb0bbe 100644 --- a/_doc/tech/aapi.rst +++ b/_doc/tech/aapi.rst @@ -104,4 +104,10 @@ it is unlikely to appear in a machine learned model. However, it is highly used when two results are compared in unit tests. The ONNX implementation is not efficient due to that reason but it only impacts -the unit tests. \ No newline at end of file +the unit tests. + +Types ++++++ + +:epkg:`onnx` supports more types than :epkg:`numpy` does. +It is not always easy to deal with bfloat16 or float8 types. diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 709e13c..859c802 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -1,8 +1,10 @@ import sys import unittest import numpy as np +from onnx import TensorProto from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.array_api import onnx_numpy as xp +from onnx_array_api.npx.npx_types import DType from onnx_array_api.npx.npx_numpy_tensors import EagerNumpyTensor as EagerTensor @@ -96,7 +98,21 @@ def test_arange_int00(self): expected = expected.astype(np.int64) self.assertEqualArray(matnp, expected) + def test_ones_like_uint16(self): + x = EagerTensor(np.array(0, dtype=np.uint16)) + y = np.ones_like(x.numpy()) + z = xp.ones_like(x) + self.assertEqual(y.dtype, x.numpy().dtype) + self.assertEqual(x.dtype, z.dtype) + self.assertEqual(x.dtype, DType(TensorProto.UINT16)) + self.assertEqual(z.dtype, DType(TensorProto.UINT16)) + self.assertEqual(x.numpy().dtype, np.uint16) + self.assertEqual(z.numpy().dtype, np.uint16) + self.assertNotIn("bfloat16", str(z.numpy().dtype)) + expected = np.array(1, dtype=np.uint16) + self.assertEqualArray(expected, z.numpy()) + if __name__ == "__main__": - # TestOnnxNumpy().test_arange_int00() + # TestOnnxNumpy().test_ones_like() unittest.main(verbosity=2) From 36ff560e1be6cdfbe9b05110a583921feae5f9f9 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 12:11:07 +0200 Subject: [PATCH 17/20] disable one test for ones_like --- _unittests/onnx-numpy-skips.txt | 3 ++- _unittests/ut_array_api/test_onnx_numpy.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/_unittests/onnx-numpy-skips.txt b/_unittests/onnx-numpy-skips.txt index dcb067c..fa97436 100644 --- a/_unittests/onnx-numpy-skips.txt +++ b/_unittests/onnx-numpy-skips.txt @@ -9,5 +9,6 @@ array_api_tests/test_creation_functions.py::test_eye array_api_tests/test_creation_functions.py::test_full_like array_api_tests/test_creation_functions.py::test_linspace array_api_tests/test_creation_functions.py::test_meshgrid -array_api_tests/test_creation_functions.py::test_ones_like +# Issue with CastLike and bfloat16 on onnx <= 1.15.0 +# array_api_tests/test_creation_functions.py::test_ones_like array_api_tests/test_creation_functions.py::test_zeros_like diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 859c802..507544f 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -1,7 +1,8 @@ import sys import unittest +from packaging.version import Version import numpy as np -from onnx import TensorProto +from onnx import TensorProto, __version__ as onnx_ver from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.array_api import onnx_numpy as xp from onnx_array_api.npx.npx_types import DType @@ -98,6 +99,8 @@ def test_arange_int00(self): expected = expected.astype(np.int64) self.assertEqualArray(matnp, expected) + @unittest.skipIf(Version(onnx_ver) >= Version("1.15.0"), + reason="Reference implementation of CastLike is bugged.") def test_ones_like_uint16(self): x = EagerTensor(np.array(0, dtype=np.uint16)) y = np.ones_like(x.numpy()) From 4a45588aa1f04e3d64fe763b76f45cd18872a2f6 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 12:12:37 +0200 Subject: [PATCH 18/20] lint --- _unittests/ut_array_api/test_onnx_numpy.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index 507544f..b603f21 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -99,8 +99,10 @@ def test_arange_int00(self): expected = expected.astype(np.int64) self.assertEqualArray(matnp, expected) - @unittest.skipIf(Version(onnx_ver) >= Version("1.15.0"), - reason="Reference implementation of CastLike is bugged.") + @unittest.skipIf( + Version(onnx_ver) >= Version("1.15.0"), + reason="Reference implementation of CastLike is bugged.", + ) def test_ones_like_uint16(self): x = EagerTensor(np.array(0, dtype=np.uint16)) y = np.ones_like(x.numpy()) From 06bc9a779685273df0ccbe5c8d7bf364d3b4c1a9 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 13:14:57 +0200 Subject: [PATCH 19/20] fix skipif --- _unittests/ut_array_api/test_onnx_numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_unittests/ut_array_api/test_onnx_numpy.py b/_unittests/ut_array_api/test_onnx_numpy.py index b603f21..9e3efb7 100644 --- a/_unittests/ut_array_api/test_onnx_numpy.py +++ b/_unittests/ut_array_api/test_onnx_numpy.py @@ -100,7 +100,7 @@ def test_arange_int00(self): self.assertEqualArray(matnp, expected) @unittest.skipIf( - Version(onnx_ver) >= Version("1.15.0"), + Version(onnx_ver) < Version("1.15.0"), reason="Reference implementation of CastLike is bugged.", ) def test_ones_like_uint16(self): From adb345d9229de327039ed0a4a634ea025d1b8387 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 28 Jun 2023 13:26:34 +0200 Subject: [PATCH 20/20] api --- _unittests/onnx-numpy-skips.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_unittests/onnx-numpy-skips.txt b/_unittests/onnx-numpy-skips.txt index fa97436..9a04400 100644 --- a/_unittests/onnx-numpy-skips.txt +++ b/_unittests/onnx-numpy-skips.txt @@ -1,7 +1,7 @@ # API failures # see https://github.com/data-apis/array-api-tests/blob/master/numpy-skips.txt array_api_tests/test_creation_functions.py::test_asarray_scalars -# array_api_tests/test_creation_functions.py::test_arange +array_api_tests/test_creation_functions.py::test_arange array_api_tests/test_creation_functions.py::test_asarray_arrays array_api_tests/test_creation_functions.py::test_empty array_api_tests/test_creation_functions.py::test_empty_like