-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[RISCV][GlobalISel] Legalize scalable vectorized G_ADD, G_SUB, G_AND, G_OR, and G_XOR #71400
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
00bcd75
3ba8881
9fa6d85
463f253
36d13c2
8a442b0
fbd3aaf
9aa2139
ffec060
9a14008
c1df380
ce77aea
1b6f40f
060edbc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py | ||
; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV32I | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. RV32I and RV64I are unused in this test. This causes FileCheck to report an error in lit. I fixed it, but you should always run There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for fixing this. The failure didn't show up in the pre commit CI checks. |
||
; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV64I | ||
|
||
define void @add_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { | ||
; CHECK-LABEL: name: add_nxv2i32 | ||
; CHECK: bb.1 (%ir-block.0): | ||
; CHECK-NEXT: liveins: $v8, $v9 | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: PseudoRET | ||
%c = add <vscale x 2 x i32> %a, %b | ||
ret void | ||
} | ||
|
||
define void @sub_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { | ||
; CHECK-LABEL: name: sub_nxv2i32 | ||
; CHECK: bb.1 (%ir-block.0): | ||
; CHECK-NEXT: liveins: $v8, $v9 | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: PseudoRET | ||
%c = sub <vscale x 2 x i32> %a, %b | ||
ret void | ||
} | ||
|
||
define void @and_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { | ||
; CHECK-LABEL: name: and_nxv2i32 | ||
; CHECK: bb.1 (%ir-block.0): | ||
; CHECK-NEXT: liveins: $v8, $v9 | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: PseudoRET | ||
%c = and <vscale x 2 x i32> %a, %b | ||
ret void | ||
} | ||
|
||
define void @or_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { | ||
; CHECK-LABEL: name: or_nxv2i32 | ||
; CHECK: bb.1 (%ir-block.0): | ||
; CHECK-NEXT: liveins: $v8, $v9 | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: PseudoRET | ||
%c = or <vscale x 2 x i32> %a, %b | ||
ret void | ||
} | ||
|
||
define void @xor_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) { | ||
; CHECK-LABEL: name: xor_nxv2i32 | ||
; CHECK: bb.1 (%ir-block.0): | ||
; CHECK-NEXT: liveins: $v8, $v9 | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: PseudoRET | ||
%c = xor <vscale x 2 x i32> %a, %b | ||
ret void | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,274 @@ | ||
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py | ||
# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s | ||
# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s | ||
--- | ||
name: test_nxv2i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv2i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 2 x s8>) = COPY $v8 | ||
%1:_(<vscale x 2 x s8>) = COPY $v9 | ||
%2:_(<vscale x 2 x s8>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 2 x s8>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv4i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv4i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 4 x s8>) = COPY $v8 | ||
%1:_(<vscale x 4 x s8>) = COPY $v9 | ||
%2:_(<vscale x 4 x s8>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 4 x s8>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv8i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv8i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 8 x s8>) = COPY $v8 | ||
%1:_(<vscale x 8 x s8>) = COPY $v9 | ||
%2:_(<vscale x 8 x s8>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 8 x s8>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv16i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv16i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m2 | ||
%0:_(<vscale x 16 x s8>) = COPY $v8m2 | ||
%1:_(<vscale x 16 x s8>) = COPY $v10m2 | ||
%2:_(<vscale x 16 x s8>) = G_ADD %0, %1 | ||
$v8m2 = COPY %2(<vscale x 16 x s8>) | ||
PseudoRET implicit $v8m2 | ||
|
||
... | ||
--- | ||
name: test_nxv32i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv32i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m4 | ||
%0:_(<vscale x 32 x s8>) = COPY $v8m4 | ||
%1:_(<vscale x 32 x s8>) = COPY $v12m4 | ||
%2:_(<vscale x 32 x s8>) = G_ADD %0, %1 | ||
$v8m4 = COPY %2(<vscale x 32 x s8>) | ||
PseudoRET implicit $v8m4 | ||
|
||
... | ||
--- | ||
name: test_nxv64i8 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv64i8 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m8 | ||
%0:_(<vscale x 64 x s8>) = COPY $v8m8 | ||
%1:_(<vscale x 64 x s8>) = COPY $v16m8 | ||
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1 | ||
$v8m8 = COPY %2(<vscale x 64 x s8>) | ||
PseudoRET implicit $v8m8 | ||
|
||
... | ||
--- | ||
name: test_nxv2i16 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv2i16 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 2 x s16>) = COPY $v8 | ||
%1:_(<vscale x 2 x s16>) = COPY $v9 | ||
%2:_(<vscale x 2 x s16>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 2 x s16>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv4i16 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv4i16 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 4 x s16>) = COPY $v8 | ||
%1:_(<vscale x 4 x s16>) = COPY $v9 | ||
%2:_(<vscale x 4 x s16>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 4 x s16>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv8i16 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv8i16 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m2 | ||
%0:_(<vscale x 8 x s16>) = COPY $v8m2 | ||
%1:_(<vscale x 8 x s16>) = COPY $v10m2 | ||
%2:_(<vscale x 8 x s16>) = G_ADD %0, %1 | ||
$v8m2 = COPY %2(<vscale x 8 x s16>) | ||
PseudoRET implicit $v8m2 | ||
|
||
... | ||
--- | ||
name: test_nxv16i16 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv16i16 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m4 | ||
%0:_(<vscale x 16 x s16>) = COPY $v8m4 | ||
%1:_(<vscale x 16 x s16>) = COPY $v12m4 | ||
%2:_(<vscale x 16 x s16>) = G_ADD %0, %1 | ||
$v8m4 = COPY %2(<vscale x 16 x s16>) | ||
PseudoRET implicit $v8m4 | ||
|
||
... | ||
--- | ||
name: test_nxv32i16 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv32i16 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m8 | ||
%0:_(<vscale x 32 x s16>) = COPY $v8m8 | ||
%1:_(<vscale x 32 x s16>) = COPY $v16m8 | ||
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1 | ||
$v8m8 = COPY %2(<vscale x 32 x s16>) | ||
PseudoRET implicit $v8m8 | ||
|
||
... | ||
--- | ||
name: test_nxv2i32 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv2i32 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>) | ||
; CHECK-NEXT: PseudoRET implicit $v8 | ||
%0:_(<vscale x 2 x s32>) = COPY $v8 | ||
%1:_(<vscale x 2 x s32>) = COPY $v9 | ||
%2:_(<vscale x 2 x s32>) = G_ADD %0, %1 | ||
$v8 = COPY %2(<vscale x 2 x s32>) | ||
PseudoRET implicit $v8 | ||
|
||
... | ||
--- | ||
name: test_nxv4i32 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv4i32 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m2 | ||
%0:_(<vscale x 4 x s32>) = COPY $v8m2 | ||
%1:_(<vscale x 4 x s32>) = COPY $v10m2 | ||
%2:_(<vscale x 4 x s32>) = G_ADD %0, %1 | ||
$v8m2 = COPY %2(<vscale x 4 x s32>) | ||
PseudoRET implicit $v8m2 | ||
|
||
... | ||
--- | ||
name: test_nxv8i32 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv8i32 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m4 | ||
%0:_(<vscale x 8 x s32>) = COPY $v8m4 | ||
%1:_(<vscale x 8 x s32>) = COPY $v12m4 | ||
%2:_(<vscale x 8 x s32>) = G_ADD %0, %1 | ||
$v8m4 = COPY %2(<vscale x 8 x s32>) | ||
PseudoRET implicit $v8m4 | ||
|
||
... | ||
--- | ||
name: test_nxv16i32 | ||
body: | | ||
bb.0.entry: | ||
|
||
; CHECK-LABEL: name: test_nxv16i32 | ||
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8 | ||
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8 | ||
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]] | ||
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>) | ||
; CHECK-NEXT: PseudoRET implicit $v8m8 | ||
%0:_(<vscale x 16 x s32>) = COPY $v8m8 | ||
%1:_(<vscale x 16 x s32>) = COPY $v16m8 | ||
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1 | ||
$v8m8 = COPY %2(<vscale x 16 x s32>) | ||
PseudoRET implicit $v8m8 | ||
|
||
... | ||
|
Uh oh!
There was an error while loading. Please reload this page.