Skip to content

Commit 0820251

Browse files
committed
[mlir][SVE] Add an e2e test for vectorization of linalg.matmul
Adds an end-2-end test for scalable vectorization of linalg.matmul. This is the most basic case where the dimension along which we vectorize fits perfectly within SVE registers. I will be extending this to more generic cases in the forthcoming patches. Depends on llvm#68794.
1 parent 9db8f99 commit 0820251

File tree

1 file changed

+77
-0
lines changed
  • mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE

1 file changed

+77
-0
lines changed
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
// RUN: mlir-opt %s -test-transform-dialect-interpreter -test-transform-dialect-erase-schedule \
2+
// RUN: -one-shot-bufferize -func-bufferize -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
3+
// RUN: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm | \
4+
// RUN: %mcr_aarch64_cmd -e=entry -entry-point-result=void --march=aarch64 --mattr="+sve" -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils | \
5+
// RUN: FileCheck %s
6+
7+
func.func @printTestEnd() {
8+
%0 = llvm.mlir.addressof @str_sve_end : !llvm.ptr<array<24 x i8>>
9+
%1 = llvm.mlir.constant(0 : index) : i64
10+
%2 = llvm.getelementptr %0[%1, %1]
11+
: (!llvm.ptr<array<24 x i8>>, i64, i64) -> !llvm.ptr<i8>
12+
llvm.call @printCString(%2) : (!llvm.ptr<i8>) -> ()
13+
return
14+
}
15+
16+
func.func @entry() {
17+
%c1 = arith.constant 1 : index
18+
%c2 = arith.constant 2 : index
19+
%c4 = arith.constant 4 : index
20+
%c0 = arith.constant 0 : index
21+
%step = arith.constant 1 : index
22+
%c0_f32 = arith.constant 0.0 : f32
23+
24+
%vscale = vector.vscale
25+
%vl_fp = arith.muli %c4, %vscale : index
26+
%A_alloc = bufferization.alloc_tensor(%c2, %c1) : tensor<?x?xf32>
27+
%B_alloc = bufferization.alloc_tensor(%c1, %vl_fp) : tensor<?x?xf32>
28+
%C_alloc = bufferization.alloc_tensor(%c2, %vl_fp) : tensor<?x?xf32>
29+
30+
%pi = arith.constant 3.14 : f32
31+
%A = linalg.fill ins(%pi : f32) outs(%A_alloc : tensor<?x?xf32>) -> tensor<?x?xf32>
32+
%B = linalg.fill ins(%pi : f32) outs(%B_alloc : tensor<?x?xf32>) -> tensor<?x?xf32>
33+
%C_in = linalg.fill ins(%c0_f32 : f32) outs(%C_alloc : tensor<?x?xf32>) -> tensor<?x?xf32>
34+
35+
%C_out = linalg.matmul ins(%A, %B: tensor<?x?xf32>, tensor<?x?xf32>) outs(%C_in: tensor<?x?xf32>) -> tensor<?x?xf32>
36+
37+
// There are at least 4 f32 elements in every SVE vector, i.e.
38+
// * %vscale is >= 1.
39+
// For implementations with wider vectors, you should see more elements being
40+
// printed.
41+
// CHECK: {{\[}}[9.8596, 9.8596, 9.8596, 9.8596
42+
// CHECK-NEXT: [9.8596, 9.8596, 9.8596, 9.8596
43+
44+
%xf = tensor.cast %C_out : tensor<?x?xf32> to tensor<*xf32>
45+
call @printMemrefF32(%xf) : (tensor<*xf32>) -> ()
46+
47+
// CHECK: SVE: END OF TEST OUTPUT
48+
func.call @printTestEnd() : () -> ()
49+
50+
return
51+
}
52+
53+
transform.sequence failures(propagate) {
54+
^bb1(%module_op: !transform.any_op):
55+
%0 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
56+
%func_op = get_parent_op %0 : (!transform.any_op) -> !transform.op<"func.func">
57+
// The tile sizes match the output matrix sizes
58+
%1, %loops:3 = transform.structured.tile_using_for %0 [2, [4], 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
59+
%2 = transform.structured.match ops{["linalg.matmul"]} in %module_op : (!transform.any_op) -> !transform.any_op
60+
// The vector sizes match the output matrix sizes
61+
// TOOD: Use variables to re-use "shared" sizes
62+
transform.structured.vectorize %2 vector_sizes [2, [4], 1] : !transform.any_op
63+
64+
transform.apply_patterns to %func_op {
65+
transform.apply_patterns.vector.reduction_to_contract
66+
transform.apply_patterns.vector.transfer_permutation_patterns
67+
transform.apply_patterns.vector.lower_masked_transfers
68+
} : !transform.op<"func.func">
69+
transform.apply_patterns to %func_op {
70+
transform.apply_patterns.vector.lower_contraction lowering_strategy = "outerproduct"
71+
transform.apply_patterns.vector.lower_outerproduct
72+
} : !transform.op<"func.func">
73+
}
74+
75+
llvm.func @printCString(!llvm.ptr<i8>)
76+
func.func private @printMemrefF32(%ptr : tensor<*xf32>)
77+
llvm.mlir.global internal constant @str_sve_end("SVE: END OF TEST OUTPUT\0A")

0 commit comments

Comments
 (0)