Skip to content

Adds graph API to the tutorial #58

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions _doc/tutorial/graph_api.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
.. _l-graph-api:

=================================
GraphBuilder: common API for ONNX
=================================

This is a very common way to build ONNX graph. There are some
annoying steps while building an ONNX graph. The first one is to
give unique names to every intermediate result in the graph. The second
is the conversion from numpy arrays to onnx tensors. A *graph builder*,
here implemented by class
:class:`GraphBuilder <onnx_array_api.graph_api.GraphBuilder>`
usually makes these two frequent tasks easier.

.. runpython::
:showcode:

import numpy as np
from onnx_array_api.graph_api import GraphBuilder
from onnx_array_api.plotting.text_plot import onnx_simple_text_plot

g = GraphBuilder()
g.make_tensor_input("X", np.float32, (None, None))
g.make_tensor_input("Y", np.float32, (None, None))
r1 = g.make_node("Sub", ["X", "Y"]) # the name given to the output is given by the class,
# it ensures the name is unique
init = g.make_initializer(np.array([2], dtype=np.int64)) # the class automatically
# converts the array to a tensor
r2 = g.make_node("Pow", [r1, init])
g.make_node("ReduceSum", [r2], outputs=["Z"]) # the output name is given because
# the user wants to choose the name
g.make_tensor_output("Z", np.float32, (None, None))

onx = g.to_onnx() # final conversion to onnx

print(onnx_simple_text_plot(onx))

A more simple versions of the same code to produce the same graph.

.. runpython::
:showcode:

import numpy as np
from onnx_array_api.graph_api import GraphBuilder
from onnx_array_api.plotting.text_plot import onnx_simple_text_plot

g = GraphBuilder()
g.make_tensor_input("X", np.float32, (None, None))
g.make_tensor_input("Y", np.float32, (None, None))
r1 = g.op.Sub("X", "Y") # the method name indicates which operator to use,
# this can be used when there is no ambiguity about the
# number of outputs
r2 = g.op.Pow(r1, np.array([2], dtype=np.int64))
g.op.ReduceSum(r2, outputs=["Z"]) # the still wants the user to specify the name
g.make_tensor_output("Z", np.float32, (None, None))

onx = g.to_onnx()

print(onnx_simple_text_plot(onx))
1 change: 1 addition & 0 deletions _doc/tutorial/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Tutorial
:maxdepth: 1

onnx_api
graph_api
light_api
numpy_api
benchmarks
70 changes: 48 additions & 22 deletions _doc/tutorial/onnx_api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -584,37 +584,31 @@ The second part modifies it.

onnx.save(gs.export_onnx(graph), "modified.onnx")

numpy API for onnx
++++++++++++++++++
Graph Builder API
+++++++++++++++++

See :ref:`l-numpy-api-onnx`. This API was introduced to create graphs
by using numpy API. If a function is defined only with numpy,
it should be possible to use the exact same code to create the
corresponding onnx graph. That's what this API tries to achieve.
It works with the exception of control flow. In that case, the function
produces different onnx graphs depending on the execution path.
See :ref:`l-graph-api`. This API is very similar to what *skl2onnx* implements.
It is still about adding nodes to a graph but some tasks are automated such as
naming the results or converting constants to onnx classes.

.. runpython::
:showcode:

import numpy as np
from onnx_array_api.npx import jit_onnx
from onnx_array_api.graph_api import GraphBuilder
from onnx_array_api.plotting.text_plot import onnx_simple_text_plot

def l2_loss(x, y):
return ((x - y) ** 2).sum(keepdims=1)

jitted_myloss = jit_onnx(l2_loss)
dummy = np.array([0], dtype=np.float32)

# The function is executed. Only then a onnx graph is created.
# One is created depending on the input type.
jitted_myloss(dummy, dummy)
g = GraphBuilder()
g.make_tensor_input("X", np.float32, (None, None))
g.make_tensor_input("Y", np.float32, (None, None))
r1 = g.op.Sub("X", "Y")
r2 = g.op.Pow(r1, np.array([2], dtype=np.int64))
g.op.ReduceSum(r2, outputs=["Z"])
g.make_tensor_output("Z", np.float32, (None, None))

onx = g.to_onnx()

# get_onnx only works if it was executed once or at least with
# the same input type
model = jitted_myloss.get_onnx()
print(onnx_simple_text_plot(model))
print(onnx_simple_text_plot(onx))

Light API
+++++++++
Expand Down Expand Up @@ -647,3 +641,35 @@ There is no eager mode.
)

print(onnx_simple_text_plot(model))

numpy API for onnx
++++++++++++++++++

See :ref:`l-numpy-api-onnx`. This API was introduced to create graphs
by using numpy API. If a function is defined only with numpy,
it should be possible to use the exact same code to create the
corresponding onnx graph. That's what this API tries to achieve.
It works with the exception of control flow. In that case, the function
produces different onnx graphs depending on the execution path.

.. runpython::
:showcode:

import numpy as np
from onnx_array_api.npx import jit_onnx
from onnx_array_api.plotting.text_plot import onnx_simple_text_plot

def l2_loss(x, y):
return ((x - y) ** 2).sum(keepdims=1)

jitted_myloss = jit_onnx(l2_loss)
dummy = np.array([0], dtype=np.float32)

# The function is executed. Only then a onnx graph is created.
# One is created depending on the input type.
jitted_myloss(dummy, dummy)

# get_onnx only works if it was executed once or at least with
# the same input type
model = jitted_myloss.get_onnx()
print(onnx_simple_text_plot(model))
2 changes: 2 additions & 0 deletions onnx_array_api/graph_api/graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ class Opset:
"Mul": 1,
"Log": 1,
"Or": 1,
"Pow": 1,
"Relu": 1,
"ReduceSum": 1,
"Reshape": 1,
"Shape": 1,
"Slice": 1,
Expand Down
24 changes: 9 additions & 15 deletions onnx_array_api/plotting/text_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,7 @@ def iterate(nodes, node, depth=0, true_false=""):
rows.extend(r)
return "\n".join(rows)

raise NotImplementedError( # pragma: no cover
f"Type {node.op_type!r} cannot be displayed."
)
raise NotImplementedError(f"Type {node.op_type!r} cannot be displayed.")


def _append_succ_pred(
Expand Down Expand Up @@ -403,7 +401,7 @@ def _find_sequence(node_name, known, done):
)

if not sequences:
raise RuntimeError( # pragma: no cover
raise RuntimeError(
"Unexpected empty sequence (len(possibles)=%d, "
"len(done)=%d, len(nodes)=%d). This is usually due to "
"a name used both as result name and node node. "
Expand Down Expand Up @@ -434,7 +432,7 @@ def _find_sequence(node_name, known, done):
best = k

if best is None:
raise RuntimeError( # pragma: no cover
raise RuntimeError(
f"Wrong implementation (len(sequence)={len(sequences)})."
)
if verbose:
Expand All @@ -453,7 +451,7 @@ def _find_sequence(node_name, known, done):
known |= set(v.output)

if len(new_nodes) != len(nodes):
raise RuntimeError( # pragma: no cover
raise RuntimeError(
"The returned new nodes are different. "
"len(nodes=%d) != %d=len(new_nodes). done=\n%r"
"\n%s\n----------\n%s"
Expand Down Expand Up @@ -486,7 +484,7 @@ def _find_sequence(node_name, known, done):
n0s = set(n.name for n in nodes)
n1s = set(n.name for n in new_nodes)
if n0s != n1s:
raise RuntimeError( # pragma: no cover
raise RuntimeError(
"The returned new nodes are different.\n"
"%r !=\n%r\ndone=\n%r"
"\n----------\n%s\n----------\n%s"
Expand Down Expand Up @@ -758,7 +756,7 @@ def str_node(indent, node):
try:
val = str(to_array(att.t).tolist())
except TypeError as e:
raise TypeError( # pragma: no cover
raise TypeError(
"Unable to display tensor type %r.\n%s"
% (att.type, str(att))
) from e
Expand Down Expand Up @@ -853,9 +851,7 @@ def str_node(indent, node):
if isinstance(att, str):
rows.append(f"attribute: {att!r}")
else:
raise NotImplementedError( # pragma: no cover
"Not yet introduced in onnx."
)
raise NotImplementedError("Not yet introduced in onnx.")

# initializer
if hasattr(model, "initializer"):
Expand Down Expand Up @@ -894,7 +890,7 @@ def str_node(indent, node):

try:
nodes = reorder_nodes_for_display(model.node, verbose=verbose)
except RuntimeError as e: # pragma: no cover
except RuntimeError as e:
if raise_exc:
raise e
else:
Expand Down Expand Up @@ -924,9 +920,7 @@ def str_node(indent, node):
indent = mi
if previous_indent is not None and indent < previous_indent:
if verbose:
print( # pragma: no cover
f"[onnx_simple_text_plot] break2 {node.op_type}"
)
print(f"[onnx_simple_text_plot] break2 {node.op_type}")
add_break = True
if not add_break and previous_out is not None:
if not (set(node.input) & previous_out):
Expand Down