diff --git a/backends/arm/test/passes/test_cast_int64_pass.py b/backends/arm/test/passes/test_cast_int64_pass.py index 988b95924fe..b9ddfcdec86 100644 --- a/backends/arm/test/passes/test_cast_int64_pass.py +++ b/backends/arm/test/passes/test_cast_int64_pass.py @@ -6,23 +6,25 @@ from typing import Tuple import torch -from executorch.backends.arm._passes.cast_int64_pass import CastInt64BuffersToInt32Pass +from executorch.backends.arm._passes import CastInt64BuffersToInt32Pass +from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import PassPipeline input_t = Tuple[torch.Tensor] # Input x class Int64Model(torch.nn.Module): + test_data = { + "rand": (torch.rand(4),), + } def forward(self, x: torch.Tensor): return x + 3 - def get_inputs(self) -> input_t: - return (torch.rand(4),) - -def test_int64_model_tosa_BI(): +@common.parametrize("test_data", Int64Model.test_data) +def test_int64_model(test_data: input_t): module = Int64Model() op_checks = { "executorch_exir_dialects_edge__ops_dim_order_ops__to_dim_order_copy_default": 1, @@ -30,13 +32,12 @@ def test_int64_model_tosa_BI(): } pipeline = PassPipeline[input_t]( module, - module.get_inputs(), - tosa_version="TOSA-0.80+BI", + test_data, + quantize=False, ops_before_pass=op_checks, ops_after_pass=op_checks, passes_with_exported_program=[CastInt64BuffersToInt32Pass], ) - pipeline.pop_stage("quantize") pipeline.run() exported_program = pipeline.tester.get_artifact("RunPasses").exported_program() diff --git a/backends/arm/test/passes/test_convert_expand_copy_to_repeat.py b/backends/arm/test/passes/test_convert_expand_copy_to_repeat.py index 5d83bc82f22..38c1cf3296e 100644 --- a/backends/arm/test/passes/test_convert_expand_copy_to_repeat.py +++ b/backends/arm/test/passes/test_convert_expand_copy_to_repeat.py @@ -35,7 +35,7 @@ def test_expand_to_repeat_tosa_BI(): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+BI", + quantize=True, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_expand_copy_default": 1, }, diff --git a/backends/arm/test/passes/test_convert_split_to_slice.py b/backends/arm/test/passes/test_convert_split_to_slice.py index d4fdffe3b01..7ca6b71236f 100644 --- a/backends/arm/test/passes/test_convert_split_to_slice.py +++ b/backends/arm/test/passes/test_convert_split_to_slice.py @@ -49,7 +49,7 @@ def test_split_to_slice_tosa_BI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+BI", + quantize=True, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_split_with_sizes_copy_default": 1, }, diff --git a/backends/arm/test/passes/test_convert_to_clamp.py b/backends/arm/test/passes/test_convert_to_clamp.py index 0b106b7bc82..c35dd1c72a5 100644 --- a/backends/arm/test/passes/test_convert_to_clamp.py +++ b/backends/arm/test/passes/test_convert_to_clamp.py @@ -3,18 +3,21 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest + +from typing import Tuple import torch from executorch.backends.arm._passes.convert_to_clamp import ConvertToClampPass from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test.tester.test_pipeline import PassPipeline -from executorch.backends.xnnpack.test.tester.tester import RunPasses +input_t = Tuple[torch.Tensor] # Input x class HardTanh(torch.nn.Module): + test_data = {"rand": (torch.rand(1, 64, 64, 3),)} + def __init__(self): super().__init__() @@ -23,11 +26,10 @@ def __init__(self): def forward(self, x): return self.hardtanh(x) - def get_inputs(self): - return (torch.rand(1, 64, 64, 3),) - class ReLU(torch.nn.Module): + test_data = {"rand": (torch.rand(1, 64, 64, 3),)} + def __init__(self): super().__init__() @@ -36,45 +38,55 @@ def __init__(self): def forward(self, x): return self.relu(x) - def get_inputs(self): - return (torch.rand(1, 64, 64, 3),) - - -class TestConvertToClampPass(unittest.TestCase): - """ - Tests the ConvertToClampPass which converts hardtanh.default and relu.default to clamp.default - """ - - def test_tosa_MI_hardtahn(self): - module = HardTanh() - test_pass_stage = RunPasses([ConvertToClampPass]) - ( - ArmTester( - module, - example_inputs=module.get_inputs(), - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge() - .check(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) - .run_passes(test_pass_stage) - .check(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) - ) - - def test_tosa_MI_relu(self): - module = ReLU() - test_pass_stage = RunPasses([ConvertToClampPass]) - ( - ArmTester( - module, - example_inputs=module.get_inputs(), - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge() - .check(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - .run_passes(test_pass_stage) - .check(["executorch_exir_dialects_edge__ops_aten_clamp_default"]) - .check_not(["executorch_exir_dialects_edge__ops_aten_relu_default"]) - ) + +""" +Tests the ConvertToClampPass which converts hardtanh.default and relu.default to clamp.default +""" + + +@common.parametrize("test_data", HardTanh.test_data) +def test_tosa_MI_hardtahn(test_data: input_t): + module = HardTanh() + op_checks_before_pass = { + "executorch_exir_dialects_edge__ops_aten_hardtanh_default": 1, + } + op_checks_after_pass = { + "executorch_exir_dialects_edge__ops_aten_clamp_default": 1, + } + op_checks_not_after_pass = [ + "executorch_exir_dialects_edge__ops_aten_hardtanh_default", + ] + pipeline = PassPipeline[input_t]( + module, + test_data, + quantize=False, + ops_before_pass=op_checks_before_pass, + ops_after_pass=op_checks_after_pass, + ops_not_after_pass=op_checks_not_after_pass, + pass_list=[ConvertToClampPass], + ) + pipeline.run() + + +@common.parametrize("test_data", ReLU.test_data) +def test_tosa_MI_relu(test_data: input_t): + module = ReLU() + op_checks_before_pass = { + "executorch_exir_dialects_edge__ops_aten_relu_default": 1, + } + op_checks_after_pass = { + "executorch_exir_dialects_edge__ops_aten_clamp_default": 1, + } + op_checks_not_after_pass = [ + "executorch_exir_dialects_edge__ops_aten_relu_default", + ] + pipeline = PassPipeline[input_t]( + module, + test_data, + quantize=False, + ops_before_pass=op_checks_before_pass, + ops_after_pass=op_checks_after_pass, + ops_not_after_pass=op_checks_not_after_pass, + pass_list=[ConvertToClampPass], + ) + pipeline.run() diff --git a/backends/arm/test/passes/test_decompose_cosine_similarity_pass.py b/backends/arm/test/passes/test_decompose_cosine_similarity_pass.py index f3fa95ec10c..31b2627b978 100644 --- a/backends/arm/test/passes/test_decompose_cosine_similarity_pass.py +++ b/backends/arm/test/passes/test_decompose_cosine_similarity_pass.py @@ -42,11 +42,11 @@ def test_decompose_cosine_similarity_tosa_BI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+BI", ops_before_pass=None, ops_not_before_pass=None, ops_after_pass=ops_after_pass, ops_not_after_pass=None, pass_list=[DecomposeCosineSimilarityPass], + quantize=True, ) pipeline.run() diff --git a/backends/arm/test/passes/test_decompose_div_pass.py b/backends/arm/test/passes/test_decompose_div_pass.py index 71d586c0029..24e18b4f523 100644 --- a/backends/arm/test/passes/test_decompose_div_pass.py +++ b/backends/arm/test/passes/test_decompose_div_pass.py @@ -47,7 +47,7 @@ def test_decompose_div_tosa_MI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_div_Tensor": 1, }, diff --git a/backends/arm/test/passes/test_decompose_layernorm_pass.py b/backends/arm/test/passes/test_decompose_layernorm_pass.py index 40e49e15bc5..9c375ceaf8f 100644 --- a/backends/arm/test/passes/test_decompose_layernorm_pass.py +++ b/backends/arm/test/passes/test_decompose_layernorm_pass.py @@ -37,7 +37,7 @@ def test_decompose_layernorm_tosa_MI(): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_native_layer_norm_default": 1, }, diff --git a/backends/arm/test/passes/test_decompose_meandim_pass.py b/backends/arm/test/passes/test_decompose_meandim_pass.py index 6ba9ceff3a7..511959e36cf 100644 --- a/backends/arm/test/passes/test_decompose_meandim_pass.py +++ b/backends/arm/test/passes/test_decompose_meandim_pass.py @@ -53,7 +53,7 @@ def test_decompose_meandim_tosa_MI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_mean_dim": 1, }, diff --git a/backends/arm/test/passes/test_decompose_softmax_pass.py b/backends/arm/test/passes/test_decompose_softmax_pass.py index efb911f03aa..6c7ed7cfb60 100644 --- a/backends/arm/test/passes/test_decompose_softmax_pass.py +++ b/backends/arm/test/passes/test_decompose_softmax_pass.py @@ -52,7 +52,7 @@ def test_softmax_basic_tosa_MI(): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten__softmax_default": 1, }, @@ -79,7 +79,7 @@ def test_softmax_log_tosa_MI(): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten__log_softmax_default": 1, }, diff --git a/backends/arm/test/passes/test_decompose_var_pass.py b/backends/arm/test/passes/test_decompose_var_pass.py index fe793dba14b..65357fc2212 100644 --- a/backends/arm/test/passes/test_decompose_var_pass.py +++ b/backends/arm/test/passes/test_decompose_var_pass.py @@ -60,7 +60,7 @@ def test_decompose_var_tosa_MI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_var_correction": 1, }, diff --git a/backends/arm/test/passes/test_fold_qdq_pass.py b/backends/arm/test/passes/test_fold_qdq_pass.py index ae9e8feef72..86324d523c6 100644 --- a/backends/arm/test/passes/test_fold_qdq_pass.py +++ b/backends/arm/test/passes/test_fold_qdq_pass.py @@ -7,6 +7,7 @@ import torch from executorch.backends.arm._passes import FoldAndAnnotateQParamsPass +from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import PassPipeline @@ -14,14 +15,16 @@ class SimpleQuantizeModel(torch.nn.Module): + test_data = { + "rand": (torch.rand(1, 1280, 7, 7), torch.rand(1, 1280, 7, 7)), + } + def forward(self, x, y): return x + torch.max((x + x), (y + y)) - def get_inputs(self) -> input_t: - return (torch.rand(1, 1280, 7, 7), torch.rand(1, 1280, 7, 7)) - -def test_fold_qdq_pass_tosa_BI(): +@common.parametrize("test_data", SimpleQuantizeModel.test_data) +def test_fold_qdq_pass_tosa_BI(test_data: input_t): """ Tests the FoldAndAnnotateQParamsPass which folds dq/q nodes into the node and stores the quantization parameters in meta. @@ -32,8 +35,8 @@ def test_fold_qdq_pass_tosa_BI(): module = SimpleQuantizeModel() pipeline = PassPipeline[input_t]( module, - module.get_inputs(), - tosa_version="TOSA-0.80+BI", + test_data, + quantize=True, ops_before_pass={ "executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 7, "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 6, diff --git a/backends/arm/test/passes/test_fuse_batchnorm_pass.py b/backends/arm/test/passes/test_fuse_batchnorm_pass.py index 415aa9f6132..de9181f9fa4 100644 --- a/backends/arm/test/passes/test_fuse_batchnorm_pass.py +++ b/backends/arm/test/passes/test_fuse_batchnorm_pass.py @@ -136,12 +136,12 @@ def forward(self, x): @common.parametrize("module", modules) -def test_fuse_batchnorm_tosa_MI(module): +def test_fuse_batchnorm_tosa_MI(module: torch.nn.Module): """Test various cases where the batchnorm should and shouldn't be fused.""" pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass=module.ops_before_pass, ops_after_pass=module.ops_after_pass, passes_with_exported_program=[FuseBatchnorm2DPass], diff --git a/backends/arm/test/passes/test_fuse_constant_ops_pass.py b/backends/arm/test/passes/test_fuse_constant_ops_pass.py index 12d85054f79..5e759d7a824 100644 --- a/backends/arm/test/passes/test_fuse_constant_ops_pass.py +++ b/backends/arm/test/passes/test_fuse_constant_ops_pass.py @@ -98,11 +98,11 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: @common.parametrize("module", modules) -def test_fuse_const_ops_tosa_MI(module): +def test_fuse_const_ops_tosa_MI(module: torch.nn.Module): pipeline = PassPipeline[input_t]( module=module, test_data=(torch.rand(1),), - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass=module.ops_before_pass, ops_after_pass=module.ops_after_pass, ops_not_after_pass=module.ops_not_after_pass, @@ -113,8 +113,13 @@ def test_fuse_const_ops_tosa_MI(module): @unittest.skip("Test failing on internal CI") @common.parametrize("module", modules) -def test_fuse_const_ops_tosa_BI(module): +def test_fuse_const_ops_tosa_BI(module: torch.nn.Module): pipeline = TosaPipelineBI[input_t]( - module, (torch.rand(10, 10),), [], [], use_to_edge_transform_and_lower=True + module, + (torch.rand(10, 10),), + [], + [], + quantize=True, + use_to_edge_transform_and_lower=True, ) pipeline.run() diff --git a/backends/arm/test/passes/test_fuse_equal_placeholders_ops_pass.py b/backends/arm/test/passes/test_fuse_equal_placeholders_ops_pass.py index 2674f45cf6a..49626eefb71 100644 --- a/backends/arm/test/passes/test_fuse_equal_placeholders_ops_pass.py +++ b/backends/arm/test/passes/test_fuse_equal_placeholders_ops_pass.py @@ -60,7 +60,7 @@ def test_fuse_equal_placeholders_constants_tosa_MI(): pipeline = PassPipeline[input_t]( module, data, - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass=module.ops_before_pass, ops_after_pass=module.ops_after_pass, passes_with_exported_program=[FuseEqualPlaceholdersPass], @@ -81,7 +81,7 @@ def test_fuse_equal_placeholders_state_dict_tosa_MI(): pipeline = PassPipeline[input_t]( module, data, - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass=module.ops_before_pass, ops_after_pass=module.ops_after_pass, passes_with_exported_program=[FuseEqualPlaceholdersPass], diff --git a/backends/arm/test/passes/test_insert_table_ops_pass.py b/backends/arm/test/passes/test_insert_table_ops_pass.py index bdbcef3713d..88ef96d71ab 100644 --- a/backends/arm/test/passes/test_insert_table_ops_pass.py +++ b/backends/arm/test/passes/test_insert_table_ops_pass.py @@ -11,26 +11,28 @@ FoldAndAnnotateQParamsPass, ) from executorch.backends.arm._passes.insert_table_ops import InsertTableOpsPass +from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import PassPipeline input_t = Tuple[torch.Tensor] # Input x class Sigmoid(torch.nn.Module): + test_data = { + "rand": (torch.rand(4),), + } def forward(self, x: torch.Tensor): return x.sigmoid() - def get_inputs(self) -> input_t: - return (torch.rand(4),) - -def test_insert_table_tosa_BI(): +@common.parametrize("test_data", Sigmoid.test_data) +def test_insert_table_tosa_BI(test_data: input_t): module = Sigmoid() pipeline = PassPipeline[input_t]( module, - module.get_inputs(), - tosa_version="TOSA-0.80+BI", + test_data, + quantize=True, ops_before_pass={}, ops_after_pass={ "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 1, diff --git a/backends/arm/test/passes/test_ioquantization_pass.py b/backends/arm/test/passes/test_ioquantization_pass.py index ecaff5e3673..2b3e900ab21 100644 --- a/backends/arm/test/passes/test_ioquantization_pass.py +++ b/backends/arm/test/passes/test_ioquantization_pass.py @@ -4,61 +4,44 @@ # LICENSE file in the root directory of this source tree. +from typing import Tuple + import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test.tester.test_pipeline import EthosU55PipelineBI from executorch.exir.passes.quantize_io_pass import QuantizeInputs, QuantizeOutputs +input_t = Tuple[torch.Tensor] + + class SimpleModel(torch.nn.Module): + test_data = { + "rand_rand": (torch.rand(1, 2, 2, 1), torch.rand(1, 2, 2, 1)), + } + def forward(self, x, y): return x + y - def get_inputs(self): - a = torch.rand(1, 2, 2, 1) - b = torch.rand(1, 2, 2, 1) - return (a, b) - -def test_ioquantisation_pass_u55_BI(): +@common.parametrize("test_data", SimpleModel.test_data) +def test_ioquantisation_pass_u55_BI(test_data: input_t): """ Test the executorch/exir/passes/quanize_io_pass pass works(meaning we don't get Q/DQ nodes) on a simple model """ model = SimpleModel() - tester = ( - ArmTester( - model, - example_inputs=model.get_inputs(), - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .to_edge() - .check_count( - { - "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 3 - } - ) - .check_count( - { - "executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 3 - } - ) - .partition() - .check_count( - { - "executorch_exir_dialects_edge__ops_quantized_decomposed_quantize_per_tensor_default": 2 - } - ) - .check_count( - { - "executorch_exir_dialects_edge__ops_quantized_decomposed_dequantize_per_tensor_default": 1 - } - ) + pipeline = EthosU55PipelineBI( + model, + test_data, + aten_ops=[], + exir_ops=[], + use_to_edge_transform_and_lower=False, ) - edge = tester.get_artifact() + pipeline.pop_stage(-1) + pipeline.run() + edge = pipeline.tester.get_artifact() edge.transform(passes=[QuantizeInputs(edge, [0, 1]), QuantizeOutputs(edge, [0])]) - tester.check_not(["edge__ops_quantized_decomposed_quantize_per_tensor"]) - tester.check_not(["edge__ops_quantized_decomposed_dequantize_per_tensor"]) + pipeline.tester.check_not(["edge__ops_quantized_decomposed_quantize_per_tensor"]) + pipeline.tester.check_not(["edge__ops_quantized_decomposed_dequantize_per_tensor"]) diff --git a/backends/arm/test/passes/test_meandim_to_averagepool2d.py b/backends/arm/test/passes/test_meandim_to_averagepool2d.py index 291272af709..fbcb26d2542 100644 --- a/backends/arm/test/passes/test_meandim_to_averagepool2d.py +++ b/backends/arm/test/passes/test_meandim_to_averagepool2d.py @@ -58,7 +58,7 @@ def get_inputs(self) -> input_t: @common.parametrize("module", modules) -def test_meandim_to_avgpool_tosa_BI(module): +def test_meandim_to_avgpool_tosa_BI(module: torch.nn.Module): """ Tests the MeanDimToAveragePool2dPass which converts mean.dim to average_pool2d for the special case where dim is [-1, -2] and keepdim is True. @@ -66,7 +66,7 @@ def test_meandim_to_avgpool_tosa_BI(module): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+BI", + quantize=True, ops_before_pass=module.ops_before_pass, ops_after_pass=module.ops_after_pass, ops_not_after_pass=module.ops_not_after_pass, diff --git a/backends/arm/test/passes/test_remove_clone_pass.py b/backends/arm/test/passes/test_remove_clone_pass.py index e586edd323d..9f317b44043 100755 --- a/backends/arm/test/passes/test_remove_clone_pass.py +++ b/backends/arm/test/passes/test_remove_clone_pass.py @@ -33,7 +33,7 @@ def test_remove_clone_tosa_BI(): pipeline = PassPipeline[input_t]( module, module.get_inputs(), - tosa_version="TOSA-0.80+BI", + quantize=True, ops_before_pass={ "executorch_exir_dialects_edge__ops_aten_clone_default": 1, }, diff --git a/backends/arm/test/passes/test_rescale_pass.py b/backends/arm/test/passes/test_rescale_pass.py index 21317c23a8a..420fdab5f45 100644 --- a/backends/arm/test/passes/test_rescale_pass.py +++ b/backends/arm/test/passes/test_rescale_pass.py @@ -4,15 +4,20 @@ # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple import pytest import torch import torch.library from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from parameterized import parameterized +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, +) + +input_t = Tuple[torch.Tensor, torch.Tensor] # Input x def test_rescale_op(): @@ -91,13 +96,13 @@ def test_zp_outside_range(): class RescaleNetwork(torch.nn.Module): - test_parameters = [ - (torch.rand(5), torch.rand(5)), - (torch.randn(5, 2), torch.randn(5, 1)), - (torch.ones(1, 10, 4, 6), torch.ones(1, 10, 4, 6)), - (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), - (10000 * torch.randn(1, 1, 4, 4), torch.randn(1, 1, 4, 1)), - ] + test_data = { + "rand": (torch.rand(5), torch.rand(5)), + "randn": (torch.randn(5, 2), torch.randn(5, 1)), + "ones": (torch.ones(1, 10, 4, 6), torch.ones(1, 10, 4, 6)), + "randn_ones": (torch.randn(1, 1, 4, 4), torch.ones(1, 1, 4, 1)), + "randn_large": (10000 * torch.randn(1, 1, 4, 4), torch.randn(1, 1, 4, 1)), + } def forward(self, x: torch.Tensor, y: torch.Tensor): a = y.exp() @@ -110,62 +115,49 @@ def forward(self, x: torch.Tensor, y: torch.Tensor): return f -def _test_rescale_pipeline( - module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] -): +@common.parametrize("test_data", RescaleNetwork.test_data) +def test_quantized_rescale_tosa_bi(test_data: tuple[torch.Tensor, torch.Tensor]): """Tests a model with many ops that requires rescales. As more ops are quantized to int32 and need the InsertRescalesPass, make sure that they play nicely together.""" - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() + module = RescaleNetwork() + pipeline = TosaPipelineBI( + module=module, + test_data=test_data, + aten_op=[], + exir_op=[], ) - if conftest.is_option_enabled("tosa_ref_model"): - tester.run_method_and_compare_outputs(test_data) - - -def _test_rescale_pipeline_ethosu( - module: torch.nn.Module, compile_spec, test_data: tuple[torch.Tensor, torch.Tensor] -): - tester = ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .serialize() + if not conftest.is_option_enabled("tosa_ref_model"): + pipeline.pop_stage("run_method_and_compare_outputs") + pipeline.run() + + +@common.parametrize("test_data", RescaleNetwork.test_data) +@common.XfailIfNoCorstone300 +def test_quantized_rescale_u55(test_data: tuple[torch.Tensor, torch.Tensor]): + """Tests a model with many ops that requires rescales. As more ops are quantized to int32 and + need the InsertRescalesPass, make sure that they play nicely together.""" + module = RescaleNetwork() + pipeline = EthosU55PipelineBI( + module=module, + test_data=test_data, + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.run() + + +@common.parametrize("test_data", RescaleNetwork.test_data) +@common.XfailIfNoCorstone320 +def test_quantized_rescale_u85(test_data: tuple[torch.Tensor, torch.Tensor]): + """Tests a model with many ops that requires rescales. As more ops are quantized to int32 and + need the InsertRescalesPass, make sure that they play nicely together.""" + module = RescaleNetwork() + pipeline = EthosU85PipelineBI( + module=module, + test_data=test_data, + aten_ops=[], + exir_ops=[], + run_on_fvp=True, ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs(inputs=test_data) - - -class TestRescales(unittest.TestCase): - - @parameterized.expand(RescaleNetwork.test_parameters) - @pytest.mark.tosa_ref_model - def test_quantized_rescale(self, x, y): - _test_rescale_pipeline(RescaleNetwork(), (x, y)) - - @parameterized.expand(RescaleNetwork.test_parameters) - @pytest.mark.corstone_fvp - def test_quantized_rescale_U55(self, x, y): - _test_rescale_pipeline_ethosu( - RescaleNetwork(), common.get_u55_compile_spec(), (x, y) - ) - - @parameterized.expand(RescaleNetwork.test_parameters) - @pytest.mark.corstone_fvp - def test_quantized_rescale_U85(self, x, y): - _test_rescale_pipeline_ethosu( - RescaleNetwork(), common.get_u85_compile_spec(), (x, y) - ) + pipeline.run() diff --git a/backends/arm/test/passes/test_unsqueeze_before_repeat_pass.py b/backends/arm/test/passes/test_unsqueeze_before_repeat_pass.py index bcecdf6d6a1..a12ac38b866 100644 --- a/backends/arm/test/passes/test_unsqueeze_before_repeat_pass.py +++ b/backends/arm/test/passes/test_unsqueeze_before_repeat_pass.py @@ -38,7 +38,7 @@ def forward(self, x: torch.Tensor): @common.parametrize("test_data", Repeat.test_data) -def test_unsqueeze_before_repeat_tosa_MI(test_data): +def test_unsqueeze_before_repeat_tosa_MI(test_data: input_t): """ When rank(input) != number of repeated dimensions (=4 in Repeat module), insert view. @@ -48,7 +48,7 @@ def test_unsqueeze_before_repeat_tosa_MI(test_data): pipeline = PassPipeline( module, data, - tosa_version="TOSA-0.80+MI", + quantize=False, ops_before_pass={"aten_repeat_default": 3}, ops_not_before_pass=["aten_view_copy_default"], ops_after_pass=ops_after_pass, diff --git a/backends/arm/test/tester/test_pipeline.py b/backends/arm/test/tester/test_pipeline.py index c4c90064bce..480497b4aee 100644 --- a/backends/arm/test/tester/test_pipeline.py +++ b/backends/arm/test/tester/test_pipeline.py @@ -618,7 +618,7 @@ def __init__( self, module: torch.nn.Module, test_data: T, - tosa_version: str, + quantize: Optional[bool] = False, ops_before_pass: Optional[Dict[str, int]] = None, ops_not_before_pass: Optional[list[str]] = None, ops_after_pass: Optional[Dict[str, int]] = None, @@ -628,8 +628,18 @@ def __init__( passes_with_exported_program: Optional[List[Type[ExportPass]]] = None, custom_path: str = None, ): + tosa_profiles = { + "0.80": TosaSpecification.create_from_string( + "TOSA-0.80+" + ("BI" if quantize else "MI") + ), + "1.0": TosaSpecification.create_from_string( + "TOSA-1.0+" + ("INT" if quantize else "FP") + ), + } + tosa_version = conftest.get_option("tosa_version") + compile_spec = common.get_tosa_compile_spec( - tosa_version, custom_path=custom_path + tosa_profiles[tosa_version], custom_path=custom_path ) super().__init__( module, @@ -648,7 +658,7 @@ def __init__( self.pop_stage("to_executorch") self.pop_stage("check.aten") - if "BI" in tosa_version: + if quantize: self.add_stage(self.tester.quantize, pos=0) # Add checks/check_not's if given