Skip to content

Commit 6e83c0a

Browse files
committed
[X86] Convert tests to opaque pointers (NFC)
1 parent 00a4e24 commit 6e83c0a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+1937
-1939
lines changed

llvm/test/CodeGen/X86/AMX/amx-combine.ll

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -133,21 +133,21 @@ entry:
133133
ret void
134134
}
135135

136-
define void @combine_v256i8amcast_with_store(i8* %src_ptr, <256 x i8>* %dst_ptr) {
136+
define void @combine_v256i8amcast_with_store(ptr %src_ptr, ptr %dst_ptr) {
137137
; CHECK-LABEL: @combine_v256i8amcast_with_store(
138138
; CHECK-NEXT: entry:
139139
; CHECK-NEXT: [[TILE:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr [[SRC_PTR:%.*]], i64 64)
140140
; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[DST_PTR:%.*]], i64 32, x86_amx [[TILE]])
141141
; CHECK-NEXT: ret void
142142
;
143143
entry:
144-
%tile = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, i8* %src_ptr, i64 64)
144+
%tile = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr %src_ptr, i64 64)
145145
%vec = call <256 x i8> @llvm.x86.cast.tile.to.vector.v256i8(x86_amx %tile)
146-
store <256 x i8> %vec, <256 x i8>* %dst_ptr, align 256
146+
store <256 x i8> %vec, ptr %dst_ptr, align 256
147147
ret void
148148
}
149149

150-
define void @combine_v256i8amcast_with_load(i8* %src_ptr, <256 x i8>* %dst_ptr) {
150+
define void @combine_v256i8amcast_with_load(ptr %src_ptr, ptr %dst_ptr) {
151151
; CHECK-LABEL: @combine_v256i8amcast_with_load(
152152
; CHECK-NEXT: entry:
153153
; CHECK-NEXT: [[TMP0:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 32, ptr [[SRC_PTR:%.*]], i64 32)
@@ -157,7 +157,7 @@ define void @combine_v256i8amcast_with_load(i8* %src_ptr, <256 x i8>* %dst_ptr)
157157
entry:
158158
%vec = load <256 x i8>, ptr %src_ptr, align 256
159159
%tile = call x86_amx @llvm.x86.cast.vector.to.tile.v256i8(<256 x i8> %vec)
160-
call void @llvm.x86.tilestored64.internal(i16 8, i16 32, <256 x i8>* %dst_ptr, i64 32, x86_amx %tile)
160+
call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr %dst_ptr, i64 32, x86_amx %tile)
161161
ret void
162162
}
163163

llvm/test/CodeGen/X86/AMX/amx-tile-complex-internals.ll

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
; RUN: -mattr=+amx-complex \
44
; RUN: -verify-machineinstrs | FileCheck %s
55

6-
define void @test_amx(i8* %pointer, i8* %base, i64 %stride) {
6+
define void @test_amx(ptr %pointer, ptr %base, i64 %stride) {
77
; CHECK-LABEL: test_amx:
88
; CHECK: # %bb.0:
99
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
@@ -27,21 +27,21 @@ define void @test_amx(i8* %pointer, i8* %base, i64 %stride) {
2727
; CHECK-NEXT: vzeroupper
2828
; CHECK-NEXT: retq
2929

30-
%a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, i8* %base, i64 %stride)
30+
%a = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 8, ptr %base, i64 %stride)
3131
%b = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
3232
%c = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 8)
3333

3434
%c1 = call x86_amx @llvm.x86.tcmmimfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c, x86_amx %a, x86_amx %b)
3535
%c2 = call x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16 8, i16 8, i16 8, x86_amx %c1, x86_amx %a, x86_amx %b)
3636

37-
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, i8* %pointer, i64 %stride, x86_amx %c2)
37+
call void @llvm.x86.tilestored64.internal(i16 8, i16 8, ptr %pointer, i64 %stride, x86_amx %c2)
3838
ret void
3939
}
4040

4141
declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
42-
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
43-
declare x86_amx @llvm.x86.tileloaddt164.internal(i16, i16, i8*, i64)
44-
declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
42+
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
43+
declare x86_amx @llvm.x86.tileloaddt164.internal(i16, i16, ptr, i64)
44+
declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
4545

4646
declare x86_amx @llvm.x86.tcmmimfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
4747
declare x86_amx @llvm.x86.tcmmrlfp16ps.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)

llvm/test/CodeGen/X86/GlobalISel/x86_64-irtranslator-struct-return.ll

Lines changed: 63 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,17 @@ define float @test_return_f1(float %f.coerce) {
2626
entry:
2727
%retval = alloca %struct.f1, align 4
2828
%f = alloca %struct.f1, align 4
29-
%coerce.dive = getelementptr inbounds %struct.f1, %struct.f1* %f, i32 0, i32 0
30-
store float %f.coerce, float* %coerce.dive, align 4
31-
%0 = bitcast %struct.f1* %retval to i8*
32-
%1 = bitcast %struct.f1* %f to i8*
33-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
34-
%coerce.dive1 = getelementptr inbounds %struct.f1, %struct.f1* %retval, i32 0, i32 0
35-
%2 = load float, float* %coerce.dive1, align 4
29+
%coerce.dive = getelementptr inbounds %struct.f1, ptr %f, i32 0, i32 0
30+
store float %f.coerce, ptr %coerce.dive, align 4
31+
%0 = bitcast ptr %retval to ptr
32+
%1 = bitcast ptr %f to ptr
33+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 4, i1 false)
34+
%coerce.dive1 = getelementptr inbounds %struct.f1, ptr %retval, i32 0, i32 0
35+
%2 = load float, ptr %coerce.dive1, align 4
3636
ret float %2
3737
}
3838

39-
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
39+
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
4040

4141
define double @test_return_d1(double %d.coerce) {
4242
; ALL-LABEL: name: test_return_d1
@@ -55,13 +55,13 @@ define double @test_return_d1(double %d.coerce) {
5555
entry:
5656
%retval = alloca %struct.d1, align 8
5757
%d = alloca %struct.d1, align 8
58-
%coerce.dive = getelementptr inbounds %struct.d1, %struct.d1* %d, i32 0, i32 0
59-
store double %d.coerce, double* %coerce.dive, align 8
60-
%0 = bitcast %struct.d1* %retval to i8*
61-
%1 = bitcast %struct.d1* %d to i8*
62-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %0, i8* align 8 %1, i64 8, i1 false)
63-
%coerce.dive1 = getelementptr inbounds %struct.d1, %struct.d1* %retval, i32 0, i32 0
64-
%2 = load double, double* %coerce.dive1, align 8
58+
%coerce.dive = getelementptr inbounds %struct.d1, ptr %d, i32 0, i32 0
59+
store double %d.coerce, ptr %coerce.dive, align 8
60+
%0 = bitcast ptr %retval to ptr
61+
%1 = bitcast ptr %d to ptr
62+
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %0, ptr align 8 %1, i64 8, i1 false)
63+
%coerce.dive1 = getelementptr inbounds %struct.d1, ptr %retval, i32 0, i32 0
64+
%2 = load double, ptr %coerce.dive1, align 8
6565
ret double %2
6666
}
6767

@@ -89,16 +89,16 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
8989
entry:
9090
%retval = alloca %struct.d2, align 8
9191
%d = alloca %struct.d2, align 8
92-
%0 = bitcast %struct.d2* %d to { double, double }*
93-
%1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0
94-
store double %d.coerce0, double* %1, align 8
95-
%2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1
96-
store double %d.coerce1, double* %2, align 8
97-
%3 = bitcast %struct.d2* %retval to i8*
98-
%4 = bitcast %struct.d2* %d to i8*
99-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %3, i8* align 8 %4, i64 16, i1 false)
100-
%5 = bitcast %struct.d2* %retval to { double, double }*
101-
%6 = load { double, double }, { double, double }* %5, align 8
92+
%0 = bitcast ptr %d to ptr
93+
%1 = getelementptr inbounds { double, double }, ptr %0, i32 0, i32 0
94+
store double %d.coerce0, ptr %1, align 8
95+
%2 = getelementptr inbounds { double, double }, ptr %0, i32 0, i32 1
96+
store double %d.coerce1, ptr %2, align 8
97+
%3 = bitcast ptr %retval to ptr
98+
%4 = bitcast ptr %d to ptr
99+
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %3, ptr align 8 %4, i64 16, i1 false)
100+
%5 = bitcast ptr %retval to ptr
101+
%6 = load { double, double }, ptr %5, align 8
102102
ret { double, double } %6
103103
}
104104

@@ -119,13 +119,13 @@ define i32 @test_return_i1(i32 %i.coerce) {
119119
entry:
120120
%retval = alloca %struct.i1, align 4
121121
%i = alloca %struct.i1, align 4
122-
%coerce.dive = getelementptr inbounds %struct.i1, %struct.i1* %i, i32 0, i32 0
123-
store i32 %i.coerce, i32* %coerce.dive, align 4
124-
%0 = bitcast %struct.i1* %retval to i8*
125-
%1 = bitcast %struct.i1* %i to i8*
126-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 4, i1 false)
127-
%coerce.dive1 = getelementptr inbounds %struct.i1, %struct.i1* %retval, i32 0, i32 0
128-
%2 = load i32, i32* %coerce.dive1, align 4
122+
%coerce.dive = getelementptr inbounds %struct.i1, ptr %i, i32 0, i32 0
123+
store i32 %i.coerce, ptr %coerce.dive, align 4
124+
%0 = bitcast ptr %retval to ptr
125+
%1 = bitcast ptr %i to ptr
126+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %0, ptr align 4 %1, i64 4, i1 false)
127+
%coerce.dive1 = getelementptr inbounds %struct.i1, ptr %retval, i32 0, i32 0
128+
%2 = load i32, ptr %coerce.dive1, align 4
129129
ret i32 %2
130130
}
131131

@@ -146,13 +146,13 @@ define i64 @test_return_i2(i64 %i.coerce) {
146146
entry:
147147
%retval = alloca %struct.i2, align 4
148148
%i = alloca %struct.i2, align 4
149-
%0 = bitcast %struct.i2* %i to i64*
150-
store i64 %i.coerce, i64* %0, align 4
151-
%1 = bitcast %struct.i2* %retval to i8*
152-
%2 = bitcast %struct.i2* %i to i8*
153-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %1, i8* align 4 %2, i64 8, i1 false)
154-
%3 = bitcast %struct.i2* %retval to i64*
155-
%4 = load i64, i64* %3, align 4
149+
%0 = bitcast ptr %i to ptr
150+
store i64 %i.coerce, ptr %0, align 4
151+
%1 = bitcast ptr %retval to ptr
152+
%2 = bitcast ptr %i to ptr
153+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %1, ptr align 4 %2, i64 8, i1 false)
154+
%3 = bitcast ptr %retval to ptr
155+
%4 = load i64, ptr %3, align 4
156156
ret i64 %4
157157
}
158158

@@ -186,20 +186,20 @@ entry:
186186
%i = alloca %struct.i3, align 4
187187
%coerce = alloca { i64, i32 }, align 4
188188
%tmp = alloca { i64, i32 }, align 8
189-
%0 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 0
190-
store i64 %i.coerce0, i64* %0, align 4
191-
%1 = getelementptr inbounds { i64, i32 }, { i64, i32 }* %coerce, i32 0, i32 1
192-
store i32 %i.coerce1, i32* %1, align 4
193-
%2 = bitcast %struct.i3* %i to i8*
194-
%3 = bitcast { i64, i32 }* %coerce to i8*
195-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %2, i8* align 4 %3, i64 12, i1 false)
196-
%4 = bitcast %struct.i3* %retval to i8*
197-
%5 = bitcast %struct.i3* %i to i8*
198-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %4, i8* align 4 %5, i64 12, i1 false)
199-
%6 = bitcast { i64, i32 }* %tmp to i8*
200-
%7 = bitcast %struct.i3* %retval to i8*
201-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %6, i8* align 4 %7, i64 12, i1 false)
202-
%8 = load { i64, i32 }, { i64, i32 }* %tmp, align 8
189+
%0 = getelementptr inbounds { i64, i32 }, ptr %coerce, i32 0, i32 0
190+
store i64 %i.coerce0, ptr %0, align 4
191+
%1 = getelementptr inbounds { i64, i32 }, ptr %coerce, i32 0, i32 1
192+
store i32 %i.coerce1, ptr %1, align 4
193+
%2 = bitcast ptr %i to ptr
194+
%3 = bitcast ptr %coerce to ptr
195+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %2, ptr align 4 %3, i64 12, i1 false)
196+
%4 = bitcast ptr %retval to ptr
197+
%5 = bitcast ptr %i to ptr
198+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %4, ptr align 4 %5, i64 12, i1 false)
199+
%6 = bitcast ptr %tmp to ptr
200+
%7 = bitcast ptr %retval to ptr
201+
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %6, ptr align 4 %7, i64 12, i1 false)
202+
%8 = load { i64, i32 }, ptr %tmp, align 8
203203
ret { i64, i32 } %8
204204
}
205205

@@ -227,15 +227,15 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
227227
entry:
228228
%retval = alloca %struct.i4, align 4
229229
%i = alloca %struct.i4, align 4
230-
%0 = bitcast %struct.i4* %i to { i64, i64 }*
231-
%1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0
232-
store i64 %i.coerce0, i64* %1, align 4
233-
%2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1
234-
store i64 %i.coerce1, i64* %2, align 4
235-
%3 = bitcast %struct.i4* %retval to i8*
236-
%4 = bitcast %struct.i4* %i to i8*
237-
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %3, i8* align 4 %4, i64 16, i1 false)
238-
%5 = bitcast %struct.i4* %retval to { i64, i64 }*
239-
%6 = load { i64, i64 }, { i64, i64 }* %5, align 4
230+
%0 = bitcast ptr %i to ptr
231+
%1 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 0
232+
store i64 %i.coerce0, ptr %1, align 4
233+
%2 = getelementptr inbounds { i64, i64 }, ptr %0, i32 0, i32 1
234+
store i64 %i.coerce1, ptr %2, align 4
235+
%3 = bitcast ptr %retval to ptr
236+
%4 = bitcast ptr %i to ptr
237+
call void @llvm.memcpy.p0.p0.i64(ptr align 4 %3, ptr align 4 %4, i64 16, i1 false)
238+
%5 = bitcast ptr %retval to ptr
239+
%6 = load { i64, i64 }, ptr %5, align 4
240240
ret { i64, i64 } %6
241241
}

0 commit comments

Comments
 (0)