From 1392f4f4e5b5fdb4031f3c371a9b11df3aaf0476 Mon Sep 17 00:00:00 2001 From: Yidi Wu Date: Tue, 7 Nov 2023 10:03:24 -0800 Subject: [PATCH] Move executorch_call_delegate to torch.ops.higher_order.executorch_call_delegate (#1149) Summary: Move torch.ops.executorch_call_delegate to torch.ops.higher_order.executorch_call_delegate. This is to better organize the namespace for all higher order operators. Differential Revision: D51032881 --- backends/xnnpack/test/models/inception_v3.py | 4 ++-- backends/xnnpack/test/models/inception_v4.py | 4 ++-- backends/xnnpack/test/models/mobilenet_v2.py | 4 ++-- backends/xnnpack/test/models/mobilenet_v3.py | 4 ++-- backends/xnnpack/test/models/torchvision_vit.py | 2 +- backends/xnnpack/test/ops/add.py | 8 ++++---- backends/xnnpack/test/ops/bilinear2d.py | 6 +++--- backends/xnnpack/test/ops/cat.py | 4 ++-- backends/xnnpack/test/ops/conv1d.py | 2 +- backends/xnnpack/test/ops/conv2d.py | 2 +- docs/source/tutorial-xnnpack-delegate-lowering.md | 4 ++-- .../tutorials_source/export-to-executorch-tutorial.py | 4 ++-- exir/backend/test/backend_with_compiler_demo.py | 2 +- exir/backend/test/test_backends.py | 8 ++++++-- exir/backend/test/test_backends_lifted.py | 8 ++++++-- exir/delegate.py | 4 +--- exir/tests/test_delegate.py | 8 ++++---- exir/tests/test_quant_lowering_custom_backend_pass.py | 2 +- exir/verification/arg_validator.py | 2 +- 19 files changed, 44 insertions(+), 38 deletions(-) diff --git a/backends/xnnpack/test/models/inception_v3.py b/backends/xnnpack/test/models/inception_v3.py index 7b028c04fc4..aff9a6cb6f4 100644 --- a/backends/xnnpack/test/models/inception_v3.py +++ b/backends/xnnpack/test/models/inception_v3.py @@ -37,7 +37,7 @@ def test_fp32_ic3(self): .to_edge() .check(list(self.all_operators)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(self.all_operators)) .to_executorch() .serialize() @@ -58,7 +58,7 @@ def test_qs8_ic3(self): .to_edge() .check(list(ops_after_quantization)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(ops_after_quantization)) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/models/inception_v4.py b/backends/xnnpack/test/models/inception_v4.py index a098f760561..534fb90ad6c 100644 --- a/backends/xnnpack/test/models/inception_v4.py +++ b/backends/xnnpack/test/models/inception_v4.py @@ -35,7 +35,7 @@ def test_fp32_ic4(self): .to_edge() .check(list(self.all_operators)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(self.all_operators)) .to_executorch() .serialize() @@ -56,7 +56,7 @@ def test_qs8_ic4(self): .to_edge() .check(list(ops_after_quantization)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(ops_after_quantization)) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/models/mobilenet_v2.py b/backends/xnnpack/test/models/mobilenet_v2.py index dd51e679cbb..470afbcb5f6 100644 --- a/backends/xnnpack/test/models/mobilenet_v2.py +++ b/backends/xnnpack/test/models/mobilenet_v2.py @@ -35,7 +35,7 @@ def test_fp32_mv2(self): .to_edge() .check(list(self.all_operators)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(self.all_operators)) .to_executorch() .serialize() @@ -56,7 +56,7 @@ def test_qs8_mv2(self): .to_edge() .check(list(ops_after_quantization)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(ops_after_quantization)) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/models/mobilenet_v3.py b/backends/xnnpack/test/models/mobilenet_v3.py index 76e167a89a5..ffa3cfbae2e 100644 --- a/backends/xnnpack/test/models/mobilenet_v3.py +++ b/backends/xnnpack/test/models/mobilenet_v3.py @@ -37,7 +37,7 @@ def test_fp32_mv3(self): .to_edge() .check(list(self.all_operators)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(self.all_operators)) .to_executorch() .serialize() @@ -58,7 +58,7 @@ def test_qs8_mv3(self): .to_edge() .check(list(ops_after_quantization)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(ops_after_lowering)) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/models/torchvision_vit.py b/backends/xnnpack/test/models/torchvision_vit.py index 9ddc47df84c..226cc73f401 100644 --- a/backends/xnnpack/test/models/torchvision_vit.py +++ b/backends/xnnpack/test/models/torchvision_vit.py @@ -53,7 +53,7 @@ def test_fp32_vit(self): .to_edge() .check(list(self.all_operators)) .partition() - .check(["torch.ops.executorch_call_delegate"]) + .check(["torch.ops.higher_order.executorch_call_delegate"]) .check_not(list(lowerable_xnn_operators)) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/ops/add.py b/backends/xnnpack/test/ops/add.py index 7cdc4515c04..7ff02344656 100644 --- a/backends/xnnpack/test/ops/add.py +++ b/backends/xnnpack/test/ops/add.py @@ -34,7 +34,7 @@ def test_fp32_add(self): .to_edge() .check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4}) .partition() - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"]) .to_executorch() .serialize() @@ -53,7 +53,7 @@ def test_qs8_add(self): .to_edge() .check_count({"executorch_exir_dialects_edge__ops_aten_add_Tensor": 4}) .partition(Partition(partitioner=XnnpackQuantizedPartitioner)) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"]) .check_not(["torch.ops.quantized_decomposed"]) .to_executorch() @@ -80,7 +80,7 @@ def test_fp32_add_relu(self): .partition() .check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"]) .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() @@ -103,7 +103,7 @@ def test_qs8_add_relu(self): .check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"]) .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) .check_not(["torch.ops.quantized_decomposed"]) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() diff --git a/backends/xnnpack/test/ops/bilinear2d.py b/backends/xnnpack/test/ops/bilinear2d.py index 127f620c262..7110bd5ad7f 100644 --- a/backends/xnnpack/test/ops/bilinear2d.py +++ b/backends/xnnpack/test/ops/bilinear2d.py @@ -86,7 +86,7 @@ def test_fp32_static_resize_bilinear2d(self): .check(self.ops) .partition() .check_not(self.ops) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() @@ -102,7 +102,7 @@ def test_fp32_static_resize_bilinear2d_with_align_cornesr(self): .check(self.ops) .partition() .check_not(self.ops) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() @@ -127,5 +127,5 @@ def test_fp32_static_resize_bilinear2d_antialiased(self): "executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2 } ) - .check_not(["torch.ops.executorch_call_delegate"]) + .check_not(["torch.ops.higher_order.executorch_call_delegate"]) ) diff --git a/backends/xnnpack/test/ops/cat.py b/backends/xnnpack/test/ops/cat.py index cb6496e4abb..e08aff8e938 100644 --- a/backends/xnnpack/test/ops/cat.py +++ b/backends/xnnpack/test/ops/cat.py @@ -27,7 +27,7 @@ def test_fp32_cat(self): .to_edge() .check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1}) .partition() - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_cat"]) .to_executorch() .serialize() @@ -51,7 +51,7 @@ def test_fp32_cat_negative_dim(self): .to_edge() .check_count({"executorch_exir_dialects_edge__ops_aten_cat": 1}) .partition() - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .check_not(["executorch_exir_dialects_edge__ops_aten_cat"]) .to_executorch() .serialize() diff --git a/backends/xnnpack/test/ops/conv1d.py b/backends/xnnpack/test/ops/conv1d.py index 29dcbd90c23..20c61c720f0 100644 --- a/backends/xnnpack/test/ops/conv1d.py +++ b/backends/xnnpack/test/ops/conv1d.py @@ -50,7 +50,7 @@ def test_conv1d(self): ) .partition() .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() diff --git a/backends/xnnpack/test/ops/conv2d.py b/backends/xnnpack/test/ops/conv2d.py index 7b50e053708..cc959f471be 100644 --- a/backends/xnnpack/test/ops/conv2d.py +++ b/backends/xnnpack/test/ops/conv2d.py @@ -76,7 +76,7 @@ def _test( ) .partition() .check_not(["executorch_exir_dialects_edge__ops_aten_convolution_default"]) - .check_count({"torch.ops.executorch_call_delegate": 1}) + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) .to_executorch() .serialize() .run_method() diff --git a/docs/source/tutorial-xnnpack-delegate-lowering.md b/docs/source/tutorial-xnnpack-delegate-lowering.md index d9a086dc7a6..ee477a96288 100644 --- a/docs/source/tutorial-xnnpack-delegate-lowering.md +++ b/docs/source/tutorial-xnnpack-delegate-lowering.md @@ -48,12 +48,12 @@ GraphModule( def forward(self, arg314_1): lowered_module_0 = self.lowered_module_0 - executorch_call_delegate = torch.ops.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None + executorch_call_delegate = torch.ops.higher_order.executorch_call_delegate(lowered_module_0, arg314_1); lowered_module_0 = arg314_1 = None getitem = executorch_call_delegate[0]; executorch_call_delegate = None aten_view_copy_default = executorch_exir_dialects_edge__ops_aten_view_copy_default(getitem, [1, 1280]); getitem = None aten_clone_default = executorch_exir_dialects_edge__ops_aten_clone_default(aten_view_copy_default); aten_view_copy_default = None lowered_module_1 = self.lowered_module_1 - executorch_call_delegate_1 = torch.ops.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None + executorch_call_delegate_1 = torch.ops.higher_order.executorch_call_delegate(lowered_module_1, aten_clone_default); lowered_module_1 = aten_clone_default = None getitem_1 = executorch_call_delegate_1[0]; executorch_call_delegate_1 = None return (getitem_1,) ``` diff --git a/docs/source/tutorials_source/export-to-executorch-tutorial.py b/docs/source/tutorials_source/export-to-executorch-tutorial.py index a420923852c..8628ca8d626 100644 --- a/docs/source/tutorials_source/export-to-executorch-tutorial.py +++ b/docs/source/tutorials_source/export-to-executorch-tutorial.py @@ -471,7 +471,7 @@ def forward(self, x): print(exported_program.graph_module.lowered_module_0.original_module) ###################################################################### -# Notice that there is now a ``torch.ops.executorch_call_delegate`` node in the +# Notice that there is now a ``torch.ops.higher_order.executorch_call_delegate`` node in the # graph, which is calling ``lowered_module_0``. Additionally, the contents of # ``lowered_module_0`` are the same as the ``lowered_module`` we created # previously. @@ -513,7 +513,7 @@ def f(a, x, b): print(delegated_program.graph_module.lowered_module_1.original_module) ###################################################################### -# Notice that there are now 2 ``torch.ops.executorch_call_delegate`` nodes in the +# Notice that there are now 2 ``torch.ops.higher_order.executorch_call_delegate`` nodes in the # graph, one containing the operations `add, mul` and the other containing the # operations `mul, add`. # diff --git a/exir/backend/test/backend_with_compiler_demo.py b/exir/backend/test/backend_with_compiler_demo.py index 26a3329578d..b714706147c 100644 --- a/exir/backend/test/backend_with_compiler_demo.py +++ b/exir/backend/test/backend_with_compiler_demo.py @@ -62,7 +62,7 @@ def forward(self, x): graph(): %arg0_1 : [#users=2] = placeholder[target=arg0_1] %lowered_module_0 : [#users=1] = get_attr[target=lowered_module_0] - %executorch_call_delegate : [#users=1] = call_function[target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {}) + %executorch_call_delegate : [#users=1] = call_function[target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, forward, %arg0_1), kwargs = {}) return [executorch_call_delegate] Args: diff --git a/exir/backend/test/test_backends.py b/exir/backend/test/test_backends.py index 6f03758fffa..a0d59ecf441 100644 --- a/exir/backend/test/test_backends.py +++ b/exir/backend/test/test_backends.py @@ -171,7 +171,9 @@ def forward(self, x): # Check that there exists a call_delegate, representing the call to the # delegated function - FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code) + FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run( + graph_module.code + ) lowered_submodules = get_lowered_submodules(graph_module) self.assertEqual(len(lowered_submodules), 1) @@ -386,7 +388,9 @@ def forward(self, x): # Check that there exists a call_delegate op, representing the call to the # delegated function - FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code) + FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run( + graph_module.code + ) for node in graph_module.graph.nodes: if node.op == "call_function" and node.target == executorch_call_delegate: diff --git a/exir/backend/test/test_backends_lifted.py b/exir/backend/test/test_backends_lifted.py index dd4d68c9fd4..5a764e8e953 100644 --- a/exir/backend/test/test_backends_lifted.py +++ b/exir/backend/test/test_backends_lifted.py @@ -198,7 +198,9 @@ def forward(self, x): # Check that there exists a call_delegate, representing the call to the # delegated function - FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code) + FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run( + graph_module.code + ) lowered_submodules = get_lowered_submodules(graph_module) self.assertEqual(len(lowered_submodules), 1) @@ -415,7 +417,9 @@ def forward(self, x): # Check that there exists a call_delegate op, representing the call to the # delegated function - FileCheck().check("torch.ops.executorch_call_delegate").run(graph_module.code) + FileCheck().check("torch.ops.higher_order.executorch_call_delegate").run( + graph_module.code + ) for node in graph_module.graph.nodes: if node.op == "call_function" and node.target == executorch_call_delegate: diff --git a/exir/delegate.py b/exir/delegate.py index 6829ddcd9e2..37cd3892e57 100644 --- a/exir/delegate.py +++ b/exir/delegate.py @@ -24,9 +24,7 @@ from torch.utils._pytree import tree_flatten -executorch_call_delegate = HigherOrderOperator( - "executorch_call_delegate", _deprecated_global_ns=True -) +executorch_call_delegate = HigherOrderOperator("executorch_call_delegate") # pyre-ignore executorch_call_delegate.fallthrough(torch._C.DispatchKey.PythonDispatcher) # pyre-ignore diff --git a/exir/tests/test_delegate.py b/exir/tests/test_delegate.py index 2ba074e5e91..62835da4981 100644 --- a/exir/tests/test_delegate.py +++ b/exir/tests/test_delegate.py @@ -44,7 +44,7 @@ def g(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: ) def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - return torch.ops.executorch_call_delegate(lowered_module, x, y) + return torch.ops.higher_order.executorch_call_delegate(lowered_module, x, y) orig_res = f(*inputs) gm = exir.capture( @@ -55,7 +55,7 @@ def f(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: ), ) FileCheck().check("lowered_module_0").check( - "torch.ops.executorch_call_delegate" + "torch.ops.higher_order.executorch_call_delegate" ).run(gm.exported_program.graph_module.code) self.assertTrue(torch.allclose(orig_res, gm(*inputs))) @@ -77,7 +77,7 @@ def test_to_backend(self) -> None: # Check that there exists a call_delegate, representing the call to the # delegated function FileCheck().check("lowered_module_0").check( - "torch.ops.executorch_call_delegate" + "torch.ops.higher_order.executorch_call_delegate" ).run(graph_module.code) # Check that there does not exist an add node (from the non-delegated @@ -90,7 +90,7 @@ def test_to_backend(self) -> None: for node in graph_module.graph.nodes: if ( node.op == "call_function" - and node.target == torch.ops.executorch_call_delegate + and node.target == torch.ops.higher_order.executorch_call_delegate ): # Check that the first argument is the lowered backend module # (which we got from a getattr) diff --git a/exir/tests/test_quant_lowering_custom_backend_pass.py b/exir/tests/test_quant_lowering_custom_backend_pass.py index 99e6dd1253a..697e43d6d34 100644 --- a/exir/tests/test_quant_lowering_custom_backend_pass.py +++ b/exir/tests/test_quant_lowering_custom_backend_pass.py @@ -594,7 +594,7 @@ def test(self) -> None: # Check the toplevel graph FileCheck().check( "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default" - ).check("torch.ops.executorch_call_delegate").check( + ).check("torch.ops.higher_order.executorch_call_delegate").check( "executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default" ).check( "executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default" diff --git a/exir/verification/arg_validator.py b/exir/verification/arg_validator.py index 9de28658169..e26cade6a8c 100644 --- a/exir/verification/arg_validator.py +++ b/exir/verification/arg_validator.py @@ -18,7 +18,7 @@ class RunHigherOrderOperatorError(Exception): """ Raised when an we try to run delegate or other HigherOrderOperator in a graph module. E.g., %executorch_call_delegate : [#users=1] = call_function[ - target=torch.ops.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {}) + target=torch.ops.higher_order.executorch_call_delegate](args = (%lowered_module_0, %arg0_1), kwargs = {}) """ def __init__(self, message: str) -> None: