Skip to content

Commit 5c92ab9

Browse files
ydwu4facebook-github-bot
authored andcommitted
Move executorch_call_delegate to torch.ops.higher_order.executorch_call_delegate (#1149)
Summary: Pull Request resolved: #1149 Move torch.ops.executorch_call_delegate to torch.ops.higher_order.executorch_call_delegate. This is to better organize the namespace for all higher order operators. Reviewed By: angelayi Differential Revision: D51032881 fbshipit-source-id: 88b746119b47a2e0478471ac914ee107d66a9350
1 parent 8d68007 commit 5c92ab9

19 files changed

+44
-38
lines changed

backends/xnnpack/test/models/inception_v3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def test_fp32_ic3(self):
3737
.to_edge()
3838
.check(list(self.all_operators))
3939
.partition()
40-
.check(["torch.ops.executorch_call_delegate"])
40+
.check(["torch.ops.higher_order.executorch_call_delegate"])
4141
.check_not(list(self.all_operators))
4242
.to_executorch()
4343
.serialize()
@@ -58,7 +58,7 @@ def test_qs8_ic3(self):
5858
.to_edge()
5959
.check(list(ops_after_quantization))
6060
.partition()
61-
.check(["torch.ops.executorch_call_delegate"])
61+
.check(["torch.ops.higher_order.executorch_call_delegate"])
6262
.check_not(list(ops_after_quantization))
6363
.to_executorch()
6464
.serialize()

backends/xnnpack/test/models/inception_v4.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def test_fp32_ic4(self):
3535
.to_edge()
3636
.check(list(self.all_operators))
3737
.partition()
38-
.check(["torch.ops.executorch_call_delegate"])
38+
.check(["torch.ops.higher_order.executorch_call_delegate"])
3939
.check_not(list(self.all_operators))
4040
.to_executorch()
4141
.serialize()
@@ -56,7 +56,7 @@ def test_qs8_ic4(self):
5656
.to_edge()
5757
.check(list(ops_after_quantization))
5858
.partition()
59-
.check(["torch.ops.executorch_call_delegate"])
59+
.check(["torch.ops.higher_order.executorch_call_delegate"])
6060
.check_not(list(ops_after_quantization))
6161
.to_executorch()
6262
.serialize()

backends/xnnpack/test/models/mobilenet_v2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def test_fp32_mv2(self):
3535
.to_edge()
3636
.check(list(self.all_operators))
3737
.partition()
38-
.check(["torch.ops.executorch_call_delegate"])
38+
.check(["torch.ops.higher_order.executorch_call_delegate"])
3939
.check_not(list(self.all_operators))
4040
.to_executorch()
4141
.serialize()
@@ -56,7 +56,7 @@ def test_qs8_mv2(self):
5656
.to_edge()
5757
.check(list(ops_after_quantization))
5858
.partition()
59-
.check(["torch.ops.executorch_call_delegate"])
59+
.check(["torch.ops.higher_order.executorch_call_delegate"])
6060
.check_not(list(ops_after_quantization))
6161
.to_executorch()
6262
.serialize()

backends/xnnpack/test/models/mobilenet_v3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def test_fp32_mv3(self):
3737
.to_edge()
3838
.check(list(self.all_operators))
3939
.partition()
40-
.check(["torch.ops.executorch_call_delegate"])
40+
.check(["torch.ops.higher_order.executorch_call_delegate"])
4141
.check_not(list(self.all_operators))
4242
.to_executorch()
4343
.serialize()
@@ -58,7 +58,7 @@ def test_qs8_mv3(self):
5858
.to_edge()
5959
.check(list(ops_after_quantization))
6060
.partition()
61-
.check(["torch.ops.executorch_call_delegate"])
61+
.check(["torch.ops.higher_order.executorch_call_delegate"])
6262
.check_not(list(ops_after_lowering))
6363
.to_executorch()
6464
.serialize()

backends/xnnpack/test/models/torchvision_vit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def test_fp32_vit(self):
5353
.to_edge()
5454
.check(list(self.all_operators))
5555
.partition()
56-
.check(["torch.ops.executorch_call_delegate"])
56+
.check(["torch.ops.higher_order.executorch_call_delegate"])
5757
.check_not(list(lowerable_xnn_operators))
5858
.to_executorch()
5959
.serialize()

backends/xnnpack/test/ops/add.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def test_fp32_add(self):
3434
.to_edge()
3535
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})
3636
.partition()
37-
.check_count({"torch.ops.executorch_call_delegate": 1})
37+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
3838
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
3939
.to_executorch()
4040
.serialize()
@@ -53,7 +53,7 @@ def test_qs8_add(self):
5353
.to_edge()
5454
.check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4})
5555
.partition(Partition(partitioner=XnnpackQuantizedPartitioner))
56-
.check_count({"torch.ops.executorch_call_delegate": 1})
56+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
5757
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
5858
.check_not(["torch.ops.quantized_decomposed"])
5959
.to_executorch()
@@ -80,7 +80,7 @@ def test_fp32_add_relu(self):
8080
.partition()
8181
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
8282
.check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"])
83-
.check_count({"torch.ops.executorch_call_delegate": 1})
83+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
8484
.to_executorch()
8585
.serialize()
8686
.run_method()
@@ -103,7 +103,7 @@ def test_qs8_add_relu(self):
103103
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
104104
.check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"])
105105
.check_not(["torch.ops.quantized_decomposed"])
106-
.check_count({"torch.ops.executorch_call_delegate": 1})
106+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
107107
.to_executorch()
108108
.serialize()
109109
.run_method()

backends/xnnpack/test/ops/bilinear2d.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def test_fp32_static_resize_bilinear2d(self):
8686
.check(self.ops)
8787
.partition()
8888
.check_not(self.ops)
89-
.check_count({"torch.ops.executorch_call_delegate": 1})
89+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
9090
.to_executorch()
9191
.serialize()
9292
.run_method()
@@ -102,7 +102,7 @@ def test_fp32_static_resize_bilinear2d_with_align_cornesr(self):
102102
.check(self.ops)
103103
.partition()
104104
.check_not(self.ops)
105-
.check_count({"torch.ops.executorch_call_delegate": 1})
105+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
106106
.to_executorch()
107107
.serialize()
108108
.run_method()
@@ -127,5 +127,5 @@ def test_fp32_static_resize_bilinear2d_antialiased(self):
127127
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2
128128
}
129129
)
130-
.check_not(["torch.ops.executorch_call_delegate"])
130+
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
131131
)

backends/xnnpack/test/ops/cat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def test_fp32_cat(self):
2727
.to_edge()
2828
.check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1})
2929
.partition()
30-
.check_count({"torch.ops.executorch_call_delegate": 1})
30+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
3131
.check_not(["executorch_exir_dialects_edge__ops_aten_cat"])
3232
.to_executorch()
3333
.serialize()
@@ -51,7 +51,7 @@ def test_fp32_cat_negative_dim(self):
5151
.to_edge()
5252
.check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1})
5353
.partition()
54-
.check_count({"torch.ops.executorch_call_delegate": 1})
54+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
5555
.check_not(["executorch_exir_dialects_edge__ops_aten_cat"])
5656
.to_executorch()
5757
.serialize()

backends/xnnpack/test/ops/conv1d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def test_conv1d(self):
5050
)
5151
.partition()
5252
.check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
53-
.check_count({"torch.ops.executorch_call_delegate": 1})
53+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
5454
.to_executorch()
5555
.serialize()
5656
.run_method()

backends/xnnpack/test/ops/conv2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def _test(
7676
)
7777
.partition()
7878
.check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"])
79-
.check_count({"torch.ops.executorch_call_delegate": 1})
79+
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
8080
.to_executorch()
8181
.serialize()
8282
.run_method()

docs/source/tutorial-xnnpack-delegate-lowering.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,12 +48,12 @@ GraphModule(
4848

4949
def forward(self, arg314_1):
5050
lowered_module_0 = self.lowered_module_0
51-
executorch_call_delegate = torch.ops.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None
51+
executorch_call_delegate = torch.ops.higher_order.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None
5252
getitem = executorch_call_delegate[0]; executorch_call_delegate = None
5353
aten_view_copy_default = executorch_exir_dialects_edge__ops_aten_view_copy_default(getitem, [1, 1280]); getitem = None
5454
aten_clone_default = executorch_exir_dialects_edge__ops_aten_clone_default(aten_view_copy_default); aten_view_copy_default = None
5555
lowered_module_1 = self.lowered_module_1
56-
executorch_call_delegate_1 = torch.ops.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None
56+
executorch_call_delegate_1 = torch.ops.higher_order.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None
5757
getitem_1 = executorch_call_delegate_1[0]; executorch_call_delegate_1 = None
5858
return (getitem_1,)
5959
```

docs/source/tutorials_source/export-to-executorch-tutorial.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,7 @@ def forward(self, x):
471471
print(exported_program.graph_module.lowered_module_0.original_module)
472472

473473
######################################################################
474-
# Notice that there is now a ``torch.ops.executorch_call_delegate`` node in the
474+
# Notice that there is now a ``torch.ops.higher_order.executorch_call_delegate`` node in the
475475
# graph, which is calling ``lowered_module_0``. Additionally, the contents of
476476
# ``lowered_module_0`` are the same as the ``lowered_module`` we created
477477
# previously.
@@ -513,7 +513,7 @@ def f(a, x, b):
513513
print(delegated_program.graph_module.lowered_module_1.original_module)
514514

515515
######################################################################
516-
# Notice that there are now 2 ``torch.ops.executorch_call_delegate`` nodes in the
516+
# Notice that there are now 2 ``torch.ops.higher_order.executorch_call_delegate`` nodes in the
517517
# graph, one containing the operations `add, mul` and the other containing the
518518
# operations `mul, add`.
519519
#

exir/backend/test/backend_with_compiler_demo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def forward(self, x):
6262
graph():
6363
%arg0_1 : [#users=2] = placeholder[target=arg0_1]
6464
%lowered_module_0 : [#users=1] = get_attr[target=lowered_module_0]
65-
%executorch_call_delegate : [#users=1] = call_function[target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {})
65+
%executorch_call_delegate : [#users=1] = call_function[target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {})
6666
return [executorch_call_delegate]
6767
6868
Args:

exir/backend/test/test_backends.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,9 @@ def forward(self, x):
171171

172172
# Check that there exists a call_delegate, representing the call to the
173173
# delegated function
174-
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
174+
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
175+
graph_module.code
176+
)
175177
lowered_submodules = get_lowered_submodules(graph_module)
176178
self.assertEqual(len(lowered_submodules), 1)
177179

@@ -386,7 +388,9 @@ def forward(self, x):
386388

387389
# Check that there exists a call_delegate op, representing the call to the
388390
# delegated function
389-
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
391+
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
392+
graph_module.code
393+
)
390394

391395
for node in graph_module.graph.nodes:
392396
if node.op == "call_function" and node.target == executorch_call_delegate:

exir/backend/test/test_backends_lifted.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,9 @@ def forward(self, x):
198198

199199
# Check that there exists a call_delegate, representing the call to the
200200
# delegated function
201-
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
201+
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
202+
graph_module.code
203+
)
202204
lowered_submodules = get_lowered_submodules(graph_module)
203205
self.assertEqual(len(lowered_submodules), 1)
204206

@@ -415,7 +417,9 @@ def forward(self, x):
415417

416418
# Check that there exists a call_delegate op, representing the call to the
417419
# delegated function
418-
FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code)
420+
FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run(
421+
graph_module.code
422+
)
419423

420424
for node in graph_module.graph.nodes:
421425
if node.op == "call_function" and node.target == executorch_call_delegate:

exir/delegate.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,7 @@
2424
from torch.utils._pytree import tree_flatten
2525

2626

27-
executorch_call_delegate = HigherOrderOperator(
28-
"executorch_call_delegate", _deprecated_global_ns=True
29-
)
27+
executorch_call_delegate = HigherOrderOperator("executorch_call_delegate")
3028
# pyre-ignore
3129
executorch_call_delegate.fallthrough(torch._C.DispatchKey.PythonDispatcher)
3230
# pyre-ignore

exir/tests/test_delegate.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
4444
)
4545

4646
def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
47-
return torch.ops.executorch_call_delegate(lowered_module, x, y)
47+
return torch.ops.higher_order.executorch_call_delegate(lowered_module, x, y)
4848

4949
orig_res = f(*inputs)
5050
gm = exir.capture(
@@ -55,7 +55,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
5555
),
5656
)
5757
FileCheck().check("lowered_module_0").check(
58-
"torch.ops.executorch_call_delegate"
58+
"torch.ops.higher_order.executorch_call_delegate"
5959
).run(gm.exported_program.graph_module.code)
6060
self.assertTrue(torch.allclose(orig_res, gm(*inputs)))
6161

@@ -77,7 +77,7 @@ def test_to_backend(self) -> None:
7777
# Check that there exists a call_delegate, representing the call to the
7878
# delegated function
7979
FileCheck().check("lowered_module_0").check(
80-
"torch.ops.executorch_call_delegate"
80+
"torch.ops.higher_order.executorch_call_delegate"
8181
).run(graph_module.code)
8282

8383
# Check that there does not exist an add node (from the non-delegated
@@ -90,7 +90,7 @@ def test_to_backend(self) -> None:
9090
for node in graph_module.graph.nodes:
9191
if (
9292
node.op == "call_function"
93-
and node.target == torch.ops.executorch_call_delegate
93+
and node.target == torch.ops.higher_order.executorch_call_delegate
9494
):
9595
# Check that the first argument is the lowered backend module
9696
# (which we got from a getattr)

exir/tests/test_quant_lowering_custom_backend_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -594,7 +594,7 @@ def test(self) -> None:
594594
# Check the toplevel graph
595595
FileCheck().check(
596596
"executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default"
597-
).check("torch.ops.executorch_call_delegate").check(
597+
).check("torch.ops.higher_order.executorch_call_delegate").check(
598598
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default"
599599
).check(
600600
"executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default"

exir/verification/arg_validator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ class RunHigherOrderOperatorError(Exception):
1818
"""
1919
Raised when an we try to run delegate or other HigherOrderOperator in a graph module.
2020
E.g., %executorch_call_delegate : [#users=1] = call_function[
21-
target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {})
21+
target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {})
2222
"""
2323

2424
def __init__(self, message: str) -> None:

0 commit comments

Comments
 (0)