Skip to content

[mlir][linalg] Add tests for PadOp #110271

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 28, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 86 additions & 8 deletions mlir/test/Dialect/Linalg/transform-op-pad.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -209,21 +209,33 @@ module attributes {transform.with_named_sequence} {

// -----

// CHECK-LABEL: @pad(
func.func @pad(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>) -> tensor<24x25xf32> {
// This is attached to an error that is silenceable and is not reported by this transform
// {{when applied to this op}}
// With all padded being static, there's nothing to pad. However, with the
// `nofold` attribute set (see `pack_paddings`), the corresponding pad Ops are
// preserved.

// CHECK-LABEL: @zero_pad_static(
func.func @zero_pad_static(%arg0: tensor<24x12xf32>,
%arg1: tensor<12x25xf32>,
%arg2: tensor<24x25xf32>) -> tensor<24x25xf32> {

// CHECK-SAME: %[[ARG_0:.*]]: tensor<24x12xf32>,
// CHECK-SAME: %[[ARG_1:.*]]: tensor<12x25xf32>,
// CHECK-SAME: %[[ARG_2:.*]]: tensor<24x25xf32>) -> tensor<24x25xf32> {

// CHECK: %[[PAD_ARG_0:.*]] = tensor.pad %[[ARG_0]] nofold low[0, 0] high[0, 0]
// CHECK: %[[PAD_ARG_1:.*]] = tensor.pad %[[ARG_1]] nofold low[0, 0] high[0, 0]
// CHECK-NOT: tensor.pad

// CHECK: %[[MATMUL:.*]] = linalg.matmul
// CHECK-SAME: ins(%[[PAD_ARG_0:.*]], %[[PAD_ARG_1:.*]] : tensor<24x12xf32>, tensor<12x25xf32>)
// CHECK-SAME: outs(%[[ARG_2]]
%0 = linalg.matmul ins(%arg0, %arg1 : tensor<24x12xf32>, tensor<12x25xf32>) outs(%arg2 : tensor<24x25xf32>) -> tensor<24x25xf32>
func.return %0 : tensor<24x25xf32>
}

module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
// This error is silenceable and is not reported by this transform
// {{transform.structured.pad failed to apply}}
%padded, %pad, %copy_back = transform.structured.pad %0 {
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 1, 2],
Expand All @@ -235,6 +247,72 @@ module attributes {transform.with_named_sequence} {

// -----

// With all padded dims being static, there's nothing to pad. However, with the
// `nofold` attribute set (see `pack_paddings`), the corresponding pad Ops are
// preserved. Same as above, but some dims are now dynamic.

// CHECK-LABEL: @zero_pad_dynamic(
func.func @zero_pad_dynamic(%arg0: tensor<?x12xf32>,
%arg1: tensor<12x?xf32>,
%arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {

// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x12xf32>,
// CHECK-SAME: %[[ARG_1:.*]]: tensor<12x?xf32>,
// CHECK-SAME: %[[ARG_2:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> {

// CHECK: %[[PAD_ARG_0:.*]] = tensor.pad %[[ARG_0]] nofold low[0, 0] high[0, 0]
// CHECK: %[[PAD_ARG_1:.*]] = tensor.pad %[[ARG_1]] nofold low[0, 0] high[0, 0]
// CHECK: %[[PAD_ARG_2:.*]] = tensor.pad %[[ARG_2]] nofold low[0, 0] high[0, 0]

// CHECK: %[[MATMUL:.*]] = linalg.matmul
// CHECK-SAME: ins(%[[PAD_ARG_0:.*]], %[[PAD_ARG_1:.*]] : tensor<?x12xf32>, tensor<12x?xf32>)
// CHECK-SAME: outs(%[[PAD_ARG_2]]
%0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x12xf32>, tensor<12x?xf32>) outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
func.return %0 : tensor<?x?xf32>
}

module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%padded, %pad, %copy_back = transform.structured.pad %0 {
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
// Note - only the static dim is padded
padding_dimensions=[2],
pack_paddings=[1, 1, 1]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
transform.yield
}
}

// -----

// Impossible to get a bound for padding - fails

func.func @negative_no_ub_estimate(%arg0: tensor<?x12xf32>,
%arg1: tensor<12x?xf32>,
%arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {

// expected-note @below {{target op}}
%0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x12xf32>, tensor<12x?xf32>) outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
func.return %0 : tensor<?x?xf32>
}

module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
// expected-error @below {{ailed to pad op}}
%padded, %pad, %copy_back = transform.structured.pad %0 {
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
// Note - attempting to pad non-static dim
padding_dimensions=[1],
pack_paddings=[1, 1, 1]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
transform.yield
}
}

// -----

// Check that the padding can be applied even when the output argument of the
// linalg op is not produced by an empty op or an extract_slice op.

Expand Down
Loading