-
Notifications
You must be signed in to change notification settings - Fork 13.6k
AMDGPU: Replace ptr addrspace(1) undefs with poison #130900
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
AMDGPU: Replace ptr addrspace(1) undefs with poison #130900
Conversation
@llvm/pr-subscribers-backend-amdgpu @llvm/pr-subscribers-llvm-globalisel Author: Matt Arsenault (arsenm) ChangesMany tests use store to undef as a placeholder use, so just replace Patch is 1.06 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/130900.diff 217 Files Affected:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
index 00c3bf30671e0..523d51ddcd2bc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll
@@ -8,6 +8,6 @@
; CHECK: {{%[0-9]+}}:_(s32) = G_ADD
define amdgpu_kernel void @addi32(i32 %arg1, i32 %arg2) {
%res = add i32 %arg1, %arg2
- store i32 %res, ptr addrspace(1) undef
+ store i32 %res, ptr addrspace(1) poison
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll
index 876f1622a24a7..aba84cd4298c1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll
@@ -102,11 +102,11 @@ entry:
br i1 %trunc, label %bb0, label %bb1
bb0:
- store volatile i32 0, ptr addrspace(1) undef
+ store volatile i32 0, ptr addrspace(1) poison
unreachable
bb1:
- store volatile i32 1, ptr addrspace(1) undef
+ store volatile i32 1, ptr addrspace(1) poison
unreachable
}
@@ -153,10 +153,10 @@ entry:
br i1 %and, label %bb0, label %bb1
bb0:
- store volatile i32 0, ptr addrspace(1) undef
+ store volatile i32 0, ptr addrspace(1) poison
unreachable
bb1:
- store volatile i32 1, ptr addrspace(1) undef
+ store volatile i32 1, ptr addrspace(1) poison
unreachable
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
index 357e9d6530ce8..e77641399f910 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll
@@ -96,7 +96,7 @@ define float @v_uitofp_to_f32_multi_use_lshr8_mask255(i32 %arg0) nounwind {
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
%lshr.8 = lshr i32 %arg0, 8
- store i32 %lshr.8, ptr addrspace(1) undef
+ store i32 %lshr.8, ptr addrspace(1) poison
%masked = and i32 %lshr.8, 255
%cvt = uitofp i32 %masked to float
ret float %cvt
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
index 5fa991cd27785..3ad5845467cd0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
@@ -22,7 +22,7 @@ entry:
br i1 %c, label %if.true, label %endif
if.true:
- %val = load volatile i32, ptr addrspace(1) undef
+ %val = load volatile i32, ptr addrspace(1) poison
br label %endif
endif:
@@ -53,7 +53,7 @@ endif:
ret i32 %v
if.true:
- %val = load volatile i32, ptr addrspace(1) undef
+ %val = load volatile i32, ptr addrspace(1) poison
br label %endif
}
@@ -78,7 +78,7 @@ entry:
br i1 %c, label %if.true, label %endif
if.true:
- %val = load volatile i32, ptr addrspace(1) undef
+ %val = load volatile i32, ptr addrspace(1) poison
br label %endif
endif:
@@ -110,7 +110,7 @@ entry:
br i1 %c, label %if.true, label %endif
if.true:
- %val = load volatile i32, ptr addrspace(1) undef
+ %val = load volatile i32, ptr addrspace(1) poison
br label %endif
endif:
@@ -237,7 +237,7 @@ bb1:
br i1 %cmp0, label %bb4, label %bb9
bb4:
- %load = load volatile i32, ptr addrspace(1) undef, align 4
+ %load = load volatile i32, ptr addrspace(1) poison, align 4
%cmp1 = icmp slt i32 %tmp, %load
br i1 %cmp1, label %bb1, label %bb9
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index c136028f2de43..137d057ef2df3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -483,7 +483,7 @@ define amdgpu_ps void @dyn_extract_v8i64_const_s_s(i32 inreg %sel) {
; GFX11-NEXT: s_endpgm
entry:
%ext = extractelement <8 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8>, i32 %sel
- store i64 %ext, ptr addrspace(1) undef
+ store i64 %ext, ptr addrspace(1) poison
ret void
}
@@ -628,7 +628,7 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
; GFX11-NEXT: s_endpgm
entry:
%ext = extractelement <8 x i64> %vec, i32 %sel
- store i64 %ext, ptr addrspace(1) undef
+ store i64 %ext, ptr addrspace(1) poison
ret void
}
@@ -744,7 +744,7 @@ define amdgpu_ps void @dyn_extract_v8i64_v_s(<8 x i64> %vec, i32 inreg %sel) {
; GFX11-NEXT: s_endpgm
entry:
%ext = extractelement <8 x i64> %vec, i32 %sel
- store i64 %ext, ptr addrspace(1) undef
+ store i64 %ext, ptr addrspace(1) poison
ret void
}
@@ -849,7 +849,7 @@ define amdgpu_ps void @dyn_extract_v8i64_s_s(<8 x i64> inreg %vec, i32 inreg %se
; GFX11-NEXT: s_endpgm
entry:
%ext = extractelement <8 x i64> %vec, i32 %sel
- store i64 %ext, ptr addrspace(1) undef
+ store i64 %ext, ptr addrspace(1) poison
ret void
}
@@ -1800,7 +1800,7 @@ define amdgpu_ps void @dyn_extract_v8p1_s_s(<8 x ptr addrspace(1)> inreg %vec, i
; GFX11-NEXT: s_endpgm
entry:
%ext = extractelement <8 x ptr addrspace(1)> %vec, i32 %idx
- store ptr addrspace(1) %ext, ptr addrspace(1) undef
+ store ptr addrspace(1) %ext, ptr addrspace(1) poison
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
index 80a9fc509d6ea..8d7c48d2c94cc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
@@ -7,11 +7,11 @@ define i1 @i1_func_void() #0 {
; CHECK-LABEL: name: i1_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i1, ptr addrspace(1) undef
+ %val = load i1, ptr addrspace(1) poison
ret i1 %val
}
@@ -19,11 +19,11 @@ define zeroext i1 @i1_zeroext_func_void() #0 {
; CHECK-LABEL: name: i1_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i1, ptr addrspace(1) undef
+ %val = load i1, ptr addrspace(1) poison
ret i1 %val
}
@@ -31,11 +31,11 @@ define signext i1 @i1_signext_func_void() #0 {
; CHECK-LABEL: name: i1_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i1, ptr addrspace(1) undef
+ %val = load i1, ptr addrspace(1) poison
ret i1 %val
}
@@ -43,11 +43,11 @@ define i7 @i7_func_void() #0 {
; CHECK-LABEL: name: i7_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s7)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i7, ptr addrspace(1) undef
+ %val = load i7, ptr addrspace(1) poison
ret i7 %val
}
@@ -55,11 +55,11 @@ define zeroext i7 @i7_zeroext_func_void() #0 {
; CHECK-LABEL: name: i7_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s7)
; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i7, ptr addrspace(1) undef
+ %val = load i7, ptr addrspace(1) poison
ret i7 %val
}
@@ -67,11 +67,11 @@ define signext i7 @i7_signext_func_void() #0 {
; CHECK-LABEL: name: i7_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s7)
; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i7, ptr addrspace(1) undef
+ %val = load i7, ptr addrspace(1) poison
ret i7 %val
}
@@ -79,11 +79,11 @@ define i8 @i8_func_void() #0 {
; CHECK-LABEL: name: i8_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i8, ptr addrspace(1) undef
+ %val = load i8, ptr addrspace(1) poison
ret i8 %val
}
@@ -91,11 +91,11 @@ define zeroext i8 @i8_zeroext_func_void() #0 {
; CHECK-LABEL: name: i8_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i8, ptr addrspace(1) undef
+ %val = load i8, ptr addrspace(1) poison
ret i8 %val
}
@@ -103,11 +103,11 @@ define signext i8 @i8_signext_func_void() #0 {
; CHECK-LABEL: name: i8_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i8, ptr addrspace(1) undef
+ %val = load i8, ptr addrspace(1) poison
ret i8 %val
}
@@ -115,11 +115,11 @@ define i16 @i16_func_void() #0 {
; CHECK-LABEL: name: i16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i16, ptr addrspace(1) undef
+ %val = load i16, ptr addrspace(1) poison
ret i16 %val
}
@@ -127,11 +127,11 @@ define zeroext i16 @i16_zeroext_func_void() #0 {
; CHECK-LABEL: name: i16_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i16, ptr addrspace(1) undef
+ %val = load i16, ptr addrspace(1) poison
ret i16 %val
}
@@ -139,11 +139,11 @@ define signext i16 @i16_signext_func_void() #0 {
; CHECK-LABEL: name: i16_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i16, ptr addrspace(1) undef
+ %val = load i16, ptr addrspace(1) poison
ret i16 %val
}
@@ -151,11 +151,11 @@ define half @f16_func_void() #0 {
; CHECK-LABEL: name: f16_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load half, ptr addrspace(1) undef
+ %val = load half, ptr addrspace(1) poison
ret half %val
}
@@ -163,11 +163,11 @@ define i24 @i24_func_void() #0 {
; CHECK-LABEL: name: i24_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s24)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i24, ptr addrspace(1) undef
+ %val = load i24, ptr addrspace(1) poison
ret i24 %val
}
@@ -175,11 +175,11 @@ define zeroext i24 @i24_zeroext_func_void() #0 {
; CHECK-LABEL: name: i24_zeroext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
; CHECK-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i24, ptr addrspace(1) undef
+ %val = load i24, ptr addrspace(1) poison
ret i24 %val
}
@@ -187,11 +187,11 @@ define signext i24 @i24_signext_func_void() #0 {
; CHECK-LABEL: name: i24_signext_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) poison`, align 4, addrspace 1)
; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s24)
; CHECK-NEXT: $vgpr0 = COPY [[SEXT]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i24, ptr addrspace(1) undef
+ %val = load i24, ptr addrspace(1) poison
ret i24 %val
}
@@ -199,14 +199,14 @@ define <2 x i24> @v2i24_func_void() #0 {
; CHECK-LABEL: name: v2i24_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `ptr addrspace(1) undef`, align 8, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `ptr addrspace(1) poison`, align 8, addrspace 1)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<2 x s24>)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
; CHECK-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
- %val = load <2 x i24>, ptr addrspace(1) undef
+ %val = load <2 x i24>, ptr addrspace(1) poison
ret <2 x i24> %val
}
@@ -214,7 +214,7 @@ define <3 x i24> @v3i24_func_void() #0 {
; CHECK-LABEL: name: v3i24_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `ptr addrspace(1) undef`, align 16, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `ptr addrspace(1) poison`, align 16, addrspace 1)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24), [[UV2:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<3 x s24>)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
@@ -223,7 +223,7 @@ define <3 x i24> @v3i24_func_void() #0 {
; CHECK-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
; CHECK-NEXT: $vgpr2 = COPY [[ANYEXT2]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
- %val = load <3 x i24>, ptr addrspace(1) undef
+ %val = load <3 x i24>, ptr addrspace(1) poison
ret <3 x i24> %val
}
@@ -231,10 +231,10 @@ define i32 @i32_func_void() #0 {
; CHECK-LABEL: name: i32_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `ptr addrspace(1) poison`, addrspace 1)
; CHECK-NEXT: $vgpr0 = COPY [[LOAD]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0
- %val = load i32, ptr addrspace(1) undef
+ %val = load i32, ptr addrspace(1) poison
ret i32 %val
}
@@ -242,13 +242,13 @@ define i48 @i48_func_void() #0 {
; CHECK-LABEL: name: i48_func_void
; CHECK: bb.1 (%ir-block.0):
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `ptr addrspace(1) undef`, align 8, addrspace 1)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `ptr addrspace(1) poison`, align 8, addrspace 1)
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
; CHECK-NEXT: $vgpr0 = COPY [[UV]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[UV1]](s32)
; CHECK-NEXT: SI_RETURN i...
[truncated]
|
You can test this locally with the following command:git diff -U0 --pickaxe-regex -S '([^a-zA-Z0-9#_-]undef[^a-zA-Z0-9_-]|UndefValue::get)' b76e396990ef63fa6deb97ae88a6e1c076fc6717 b89ce1d737f79f94e16ba89d5532e4bab835c89e llvm/test/CodeGen/AMDGPU/GlobalISel/amdgpu-irtranslator.ll llvm/test/CodeGen/AMDGPU/GlobalISel/bool-legalization.ll llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.v2i65.ll llvm/test/CodeGen/AMDGPU/GlobalISel/implicit-kernarg-backend-usage-global-isel.ll llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel-system-sgprs.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_kernel.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-sret.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fast-math-flags.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.v2i65.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-readnone-intrinsic-callsite.ll llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-struct-return-intrinsics.ll llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.dim.a16.ll llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.d16.ll llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.ll llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.3d.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.end.cf.i32.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.end.cf.i64.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.global.atomic.csub.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.if.break.i32.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.if.break.i64.ll llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.kernarg.segment.ptr.ll llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll llvm/test/CodeGen/AMDGPU/GlobalISel/non-entry-alloca.ll llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.image.load.1d.ll llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.image.sample.1d.ll llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.s.buffer.load.ll llvm/test/CodeGen/AMDGPU/GlobalISel/smrd.ll llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll llvm/test/CodeGen/AMDGPU/adjust-writemask-invalid-copy.ll llvm/test/CodeGen/AMDGPU/agpr-csr.ll llvm/test/CodeGen/AMDGPU/amdgcn-ieee.ll llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-fold-binop-select.ll llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-i16-to-i32.ll llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll llvm/test/CodeGen/AMDGPU/amdgpu-shader-calling-convention.ll llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll llvm/test/CodeGen/AMDGPU/anyext.ll llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll llvm/test/CodeGen/AMDGPU/bfe-patterns.ll llvm/test/CodeGen/AMDGPU/bfi_int.ll llvm/test/CodeGen/AMDGPU/bfi_int.r600.ll llvm/test/CodeGen/AMDGPU/branch-relaxation.ll llvm/test/CodeGen/AMDGPU/bug-v4f64-subvector.ll llvm/test/CodeGen/AMDGPU/call-argument-types.ll llvm/test/CodeGen/AMDGPU/call-constant.ll llvm/test/CodeGen/AMDGPU/call-constexpr.ll llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll llvm/test/CodeGen/AMDGPU/call-return-types.ll llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs-packed.ll llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll llvm/test/CodeGen/AMDGPU/calling-conventions.ll llvm/test/CodeGen/AMDGPU/captured-frame-index.ll llvm/test/CodeGen/AMDGPU/cayman-loop-bug.ll llvm/test/CodeGen/AMDGPU/cgp-addressing-modes-gfx908.ll llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll llvm/test/CodeGen/AMDGPU/clamp-modifier.ll llvm/test/CodeGen/AMDGPU/clamp.ll llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll llvm/test/CodeGen/AMDGPU/coalesce-vgpr-alignment.ll llvm/test/CodeGen/AMDGPU/commute-compares.ll llvm/test/CodeGen/AMDGPU/constant-fold-mi-operands.ll llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll llvm/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll llvm/test/CodeGen/AMDGPU/dag-divergence.ll llvm/test/CodeGen/AMDGPU/dead-machine-elim-after-dead-lane.ll llvm/test/CodeGen/AMDGPU/debug-value.ll llvm/test/CodeGen/AMDGPU/early-inline.ll llvm/test/CodeGen/AMDGPU/extract_vector_elt-f16.ll llvm/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll llvm/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll llvm/test/CodeGen/AMDGPU/fabs.f16.ll llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll llvm/test/CodeGen/AMDGPU/fcanonicalize-elimination.ll llvm/test/CodeGen/AMDGPU/fcanonicalize.f16.ll llvm/test/CodeGen/AMDGPU/fdiv.f16.ll llvm/test/CodeGen/AMDGPU/fdiv.f64.ll llvm/test/CodeGen/AMDGPU/fmed3.ll llvm/test/CodeGen/AMDGPU/fminnum.f64.ll llvm/test/CodeGen/AMDGPU/fneg-combines.ll llvm/test/CodeGen/AMDGPU/fneg.f16.ll llvm/test/CodeGen/AMDGPU/fp-min-max-buffer-atomics.ll llvm/test/CodeGen/AMDGPU/fp-min-max-buffer-ptr-atomics.ll llvm/test/CodeGen/AMDGPU/fpext.f16.ll llvm/test/CodeGen/AMDGPU/function-args-inreg.ll llvm/test/CodeGen/AMDGPU/function-args.ll llvm/test/CodeGen/AMDGPU/function-returns.ll llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll llvm/test/CodeGen/AMDGPU/gfx11-user-sgpr-init16-bug.ll llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd-wrong-subtarget.ll llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll llvm/test/CodeGen/AMDGPU/global-saddr-load.ll llvm/test/CodeGen/AMDGPU/global-saddr-store.ll llvm/test/CodeGen/AMDGPU/global-smrd-unknown.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-heap-v5.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v4.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-multigrid-sync-arg-v5.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-queue-ptr-v5.ll llvm/test/CodeGen/AMDGPU/hsa-metadata-queueptr-v5.ll llvm/test/CodeGen/AMDGPU/hsa.ll llvm/test/CodeGen/AMDGPU/huge-private-buffer.ll llvm/test/CodeGen/AMDGPU/i1-copy-phi.ll llvm/test/CodeGen/AMDGPU/image-load-d16-tfe.ll llvm/test/CodeGen/AMDGPU/implicit-kernarg-backend-usage.ll llvm/test/CodeGen/AMDGPU/implicit-kernel-argument-alignment.ll llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll llvm/test/CodeGen/AMDGPU/inline-asm.ll llvm/test/CodeGen/AMDGPU/inline-maxbb.ll llvm/test/CodeGen/AMDGPU/insert_vector_elt.v2i16.ll llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll llvm/test/CodeGen/AMDGPU/ipra.ll llvm/test/CodeGen/AMDGPU/kernel-args.ll llvm/test/CodeGen/AMDGPU/kernel-argument-dag-lowering.ll llvm/test/CodeGen/AMDGPU/large-alloca-compute.ll llvm/test/CodeGen/AMDGPU/large-alloca-graphics.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.interp.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kernarg.segment.ptr.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.inv.vol.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.ll llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.dcache.wb.vol.ll llvm/test/CodeGen/AMDGPU/llvm.mulo.ll llvm/test/CodeGen/AMDGPU/load-constant-i16.ll llvm/test/CodeGen/AMDGPU/load-hi16.ll llvm/test/CodeGen/AMDGPU/load-lo16.ll llvm/test/CodeGen/AMDGPU/long-branch-reserve-register.ll llvm/test/CodeGen/AMDGPU/loop-on-function-argument.ll llvm/test/CodeGen/AMDGPU/loop_break.ll llvm/test/CodeGen/AMDGPU/lower-kernargs.ll llvm/test/CodeGen/AMDGPU/mad-mix-hi.ll llvm/test/CodeGen/AMDGPU/madak.ll llvm/test/CodeGen/AMDGPU/mmo-target-flags-folding.ll llvm/test/CodeGen/AMDGPU/mubuf-offset-private.ll llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll llvm/test/CodeGen/AMDGPU/multi-divergent-exit-region.ll llvm/test/CodeGen/AMDGPU/multilevel-break.ll llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll llvm/test/CodeGen/AMDGPU/offset-split-global.ll llvm/test/CodeGen/AMDGPU/omod.ll llvm/test/CodeGen/AMDGPU/operand-folding.ll llvm/test/CodeGen/AMDGPU/or.ll llvm/test/CodeGen/AMDGPU/pack.v2f16.ll llvm/test/CodeGen/AMDGPU/pack.v2i16.ll llvm/test/CodeGen/AMDGPU/packed-op-sel.ll llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll llvm/test/CodeGen/AMDGPU/partial-shift-shrink.ll llvm/test/CodeGen/AMDGPU/permute_i8.ll llvm/test/CodeGen/AMDGPU/promote-alloca-calling-conv.ll llvm/test/CodeGen/AMDGPU/promote-alloca-strip-abi-opt-attributes.ll llvm/test/CodeGen/AMDGPU/promote-alloca-to-lds-constantexpr-use.ll llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll llvm/test/CodeGen/AMDGPU/rcp-pattern.ll llvm/test/CodeGen/AMDGPU/recursion.ll llvm/test/CodeGen/AMDGPU/ret_jump.ll llvm/test/CodeGen/AMDGPU/returnaddress.ll llvm/test/CodeGen/AMDGPU/s-getpc-b64-remat.ll llvm/test/CodeGen/AMDGPU/sad.ll llvm/test/CodeGen/AMDGPU/salu-to-valu.ll llvm/test/CodeGen/AMDGPU/scalar-branch-missing-and-exec.ll llvm/test/CodeGen/AMDGPU/scalar_to_vector.ll llvm/test/CodeGen/AMDGPU/sdwa-peephole.ll llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract-legacy.ll llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll llvm/test/CodeGen/AMDGPU/select-undef.ll llvm/test/CodeGen/AMDGPU/setcc-fneg-constant.ll llvm/test/CodeGen/AMDGPU/setcc-opt.ll llvm/test/CodeGen/AMDGPU/shl.ll llvm/test/CodeGen/AMDGPU/si-annotate-cf-noloop.ll llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll llvm/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll llvm/test/CodeGen/AMDGPU/skip-if-dead.ll llvm/test/CodeGen/AMDGPU/sminmax.ll llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll llvm/test/CodeGen/AMDGPU/smrd.ll llvm/test/CodeGen/AMDGPU/spill-agpr.ll llvm/test/CodeGen/AMDGPU/spill-m0.ll llvm/test/CodeGen/AMDGPU/spill-vector-superclass.ll llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr.ll llvm/test/CodeGen/AMDGPU/spill-vgpr.ll llvm/test/CodeGen/AMDGPU/sram-ecc-default.ll llvm/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll llvm/test/CodeGen/AMDGPU/transform-block-with-return-to-epilog.ll llvm/test/CodeGen/AMDGPU/trunc-combine.ll llvm/test/CodeGen/AMDGPU/trunc-store-vec-i16-to-i8.ll llvm/test/CodeGen/AMDGPU/udiv.ll llvm/test/CodeGen/AMDGPU/undefined-subreg-liverange.ll llvm/test/CodeGen/AMDGPU/uniform-cfg.ll llvm/test/CodeGen/AMDGPU/unpack-half.ll llvm/test/CodeGen/AMDGPU/v_add_u64_pseudo_sdwa.ll llvm/test/CodeGen/AMDGPU/v_sub_u64_pseudo_sdwa.ll llvm/test/CodeGen/AMDGPU/vgpr-liverange-ir.ll llvm/test/CodeGen/AMDGPU/vgpr-mark-last-scratch-load.ll llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll llvm/test/CodeGen/AMDGPU/visit-physreg-vgpr-imm-folding-bug.ll llvm/test/CodeGen/AMDGPU/wave32.ll llvm/test/CodeGen/AMDGPU/whole-wave-register-spill.ll llvm/test/CodeGen/AMDGPU/wqm.ll llvm/test/CodeGen/AMDGPU/xor-r600.ll llvm/test/CodeGen/AMDGPU/xor.ll The following files introduce new uses of undef:
Undef is now deprecated and should only be used in the rare cases where no replacement is possible. For example, a load of uninitialized memory yields In tests, avoid using For example, this is considered a bad practice: define void @fn() {
...
br i1 undef, ...
} Please use the following instead: define void @fn(i1 %cond) {
...
br i1 %cond, ...
} Please refer to the Undefined Behavior Manual for more information. |
ee3af3d
to
d5e21c3
Compare
413a29a
to
23b3d9e
Compare
212be89
to
ee004b3
Compare
23b3d9e
to
5fe605b
Compare
64e3008
to
928fe27
Compare
ee004b3
to
b89ce1d
Compare
Many tests use store to undef as a placeholder use, so just replace all of these with poison.
b89ce1d
to
37db9b5
Compare
Many tests use store to undef as a placeholder use, so just replace all of these with poison.
Many tests use store to undef as a placeholder use, so just replace
all of these with poison.