Skip to content

Move executorch_call_delegate to torch.ops.higher_order.executorch_call_delegate #1149

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions backends/xnnpack/test/models/inception_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_fp32_ic3(self):
.to_edge()
.check(list(self.all_operators))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
Expand All @@ -58,7 +58,7 @@ def test_qs8_ic3(self):
.to_edge()
.check(list(ops_after_quantization))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
Expand Down
4 changes: 2 additions & 2 deletions backends/xnnpack/test/models/inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_fp32_ic4(self):
.to_edge()
.check(list(self.all_operators))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
Expand All @@ -56,7 +56,7 @@ def test_qs8_ic4(self):
.to_edge()
.check(list(ops_after_quantization))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
Expand Down
4 changes: 2 additions & 2 deletions backends/xnnpack/test/models/mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_fp32_mv2(self):
.to_edge()
.check(list(self.all_operators))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
Expand All @@ -56,7 +56,7 @@ def test_qs8_mv2(self):
.to_edge()
.check(list(ops_after_quantization))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(ops_after_quantization))
.to_executorch()
.serialize()
Expand Down
4 changes: 2 additions & 2 deletions backends/xnnpack/test/models/mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_fp32_mv3(self):
.to_edge()
.check(list(self.all_operators))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(self.all_operators))
.to_executorch()
.serialize()
Expand All @@ -58,7 +58,7 @@ def test_qs8_mv3(self):
.to_edge()
.check(list(ops_after_quantization))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(ops_after_lowering))
.to_executorch()
.serialize()
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/torchvision_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def test_fp32_vit(self):
.to_edge()
.check(list(self.all_operators))
.partition()
.check(["torch.ops.executorch_call_delegate"])
.check(["torch.ops.higher_order.executorch_call_delegate"])
.check_not(list(lowerable_xnn_operators))
.to_executorch()
.serialize()
Expand Down
8 changes: 4 additions & 4 deletions backends/xnnpack/test/ops/add.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_fp32_add(self):
.to_edge()
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})
.partition()
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.to_executorch()
.serialize()
Expand All @@ -53,7 +53,7 @@ def test_qs8_add(self):
.to_edge()
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})
.partition(Partition(partitioner=XnnpackQuantizedPartitioner))
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.check_not(["torch.ops.quantized_decomposed"])
.to_executorch()
Expand All @@ -80,7 +80,7 @@ def test_fp32_add_relu(self):
.partition()
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"])
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand All @@ -103,7 +103,7 @@ def test_qs8_add_relu(self):
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"])
.check_not(["torch.ops.quantized_decomposed"])
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand Down
6 changes: 3 additions & 3 deletions backends/xnnpack/test/ops/bilinear2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def test_fp32_static_resize_bilinear2d(self):
.check(self.ops)
.partition()
.check_not(self.ops)
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand All @@ -102,7 +102,7 @@ def test_fp32_static_resize_bilinear2d_with_align_cornesr(self):
.check(self.ops)
.partition()
.check_not(self.ops)
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand All @@ -127,5 +127,5 @@ def test_fp32_static_resize_bilinear2d_antialiased(self):
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2
}
)
.check_not(["torch.ops.executorch_call_delegate"])
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
4 changes: 2 additions & 2 deletions backends/xnnpack/test/ops/cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_fp32_cat(self):
.to_edge()
.check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1})
.partition()
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_cat"])
.to_executorch()
.serialize()
Expand All @@ -51,7 +51,7 @@ def test_fp32_cat_negative_dim(self):
.to_edge()
.check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1})
.partition()
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_cat"])
.to_executorch()
.serialize()
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/ops/conv1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def test_conv1d(self):
)
.partition()
.check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/ops/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def _test(
)
.partition()
.check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
.check_count({"torch.ops.executorch_call_delegate": 1})
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method()
Expand Down
4 changes: 2 additions & 2 deletions docs/source/tutorial-xnnpack-delegate-lowering.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ GraphModule(

def forward(self, arg314_1):
lowered_module_0 = self.lowered_module_0
executorch_call_delegate = torch.ops.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None
executorch_call_delegate = torch.ops.higher_order.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None
getitem = executorch_call_delegate[0]; executorch_call_delegate = None
aten_view_copy_default = executorch_exir_dialects_edge__ops_aten_view_copy_default(getitem, [1, 1280]); getitem = None
aten_clone_default = executorch_exir_dialects_edge__ops_aten_clone_default(aten_view_copy_default); aten_view_copy_default = None
lowered_module_1 = self.lowered_module_1
executorch_call_delegate_1 = torch.ops.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None
executorch_call_delegate_1 = torch.ops.higher_order.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None
getitem_1 = executorch_call_delegate_1[0]; executorch_call_delegate_1 = None
return (getitem_1,)
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def forward(self, x):
print(exported_program.graph_module.lowered_module_0.original_module)

######################################################################
# Notice that there is now a ``torch.ops.executorch_call_delegate`` node in the
# Notice that there is now a ``torch.ops.higher_order.executorch_call_delegate`` node in the
# graph, which is calling ``lowered_module_0``. Additionally, the contents of
# ``lowered_module_0`` are the same as the ``lowered_module`` we created
# previously.
Expand Down Expand Up @@ -513,7 +513,7 @@ def f(a, x, b):
print(delegated_program.graph_module.lowered_module_1.original_module)

######################################################################
# Notice that there are now 2 ``torch.ops.executorch_call_delegate`` nodes in the
# Notice that there are now 2 ``torch.ops.higher_order.executorch_call_delegate`` nodes in the
# graph, one containing the operations `add, mul` and the other containing the
# operations `mul, add`.
#
Expand Down
2 changes: 1 addition & 1 deletion exir/backend/test/backend_with_compiler_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def forward(self, x):
graph():
%arg0_1 : [#users=2] = placeholder[target=arg0_1]
%lowered_module_0 : [#users=1] = get_attr[target=lowered_module_0]
%executorch_call_delegate : [#users=1] = call_function[target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {})
%executorch_call_delegate : [#users=1] = call_function[target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {})
return [executorch_call_delegate]

Args:
Expand Down
8 changes: 6 additions & 2 deletions exir/backend/test/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,9 @@ def forward(self, x):

# Check that there exists a call_delegate, representing the call to the
# delegated function
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
graph_module.code
)
lowered_submodules = get_lowered_submodules(graph_module)
self.assertEqual(len(lowered_submodules), 1)

Expand Down Expand Up @@ -386,7 +388,9 @@ def forward(self, x):

# Check that there exists a call_delegate op, representing the call to the
# delegated function
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
graph_module.code
)

for node in graph_module.graph.nodes:
if node.op == "call_function" and node.target == executorch_call_delegate:
Expand Down
8 changes: 6 additions & 2 deletions exir/backend/test/test_backends_lifted.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,9 @@ def forward(self, x):

# Check that there exists a call_delegate, representing the call to the
# delegated function
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
graph_module.code
)
lowered_submodules = get_lowered_submodules(graph_module)
self.assertEqual(len(lowered_submodules), 1)

Expand Down Expand Up @@ -415,7 +417,9 @@ def forward(self, x):

# Check that there exists a call_delegate op, representing the call to the
# delegated function
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
graph_module.code
)

for node in graph_module.graph.nodes:
if node.op == "call_function" and node.target == executorch_call_delegate:
Expand Down
4 changes: 1 addition & 3 deletions exir/delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,7 @@
from torch.utils._pytree import tree_flatten


executorch_call_delegate = HigherOrderOperator(
"executorch_call_delegate", _deprecated_global_ns=True
)
executorch_call_delegate = HigherOrderOperator("executorch_call_delegate")
# pyre-ignore
executorch_call_delegate.fallthrough(torch._C.DispatchKey.PythonDispatcher)
# pyre-ignore
Expand Down
8 changes: 4 additions & 4 deletions exir/tests/test_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
)

def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.ops.executorch_call_delegate(lowered_module, x, y)
return torch.ops.higher_order.executorch_call_delegate(lowered_module, x, y)

orig_res = f(*inputs)
gm = exir.capture(
Expand All @@ -55,7 +55,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
),
)
FileCheck().check("lowered_module_0").check(
"torch.ops.executorch_call_delegate"
"torch.ops.higher_order.executorch_call_delegate"
).run(gm.exported_program.graph_module.code)
self.assertTrue(torch.allclose(orig_res, gm(*inputs)))

Expand All @@ -77,7 +77,7 @@ def test_to_backend(self) -> None:
# Check that there exists a call_delegate, representing the call to the
# delegated function
FileCheck().check("lowered_module_0").check(
"torch.ops.executorch_call_delegate"
"torch.ops.higher_order.executorch_call_delegate"
).run(graph_module.code)

# Check that there does not exist an add node (from the non-delegated
Expand All @@ -90,7 +90,7 @@ def test_to_backend(self) -> None:
for node in graph_module.graph.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.executorch_call_delegate
and node.target == torch.ops.higher_order.executorch_call_delegate
):
# Check that the first argument is the lowered backend module
# (which we got from a getattr)
Expand Down
2 changes: 1 addition & 1 deletion exir/tests/test_quant_lowering_custom_backend_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ def test(self) -> None:
# Check the toplevel graph
FileCheck().check(
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default"
).check("torch.ops.executorch_call_delegate").check(
).check("torch.ops.higher_order.executorch_call_delegate").check(
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default"
).check(
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default"
Expand Down
2 changes: 1 addition & 1 deletion exir/verification/arg_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class RunHigherOrderOperatorError(Exception):
"""
Raised when an we try to run delegate or other HigherOrderOperator in a graph module.
E.g., %executorch_call_delegate : [#users=1] = call_function[
target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {})
target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {})
"""

def __init__(self, message: str) -> None:
Expand Down