@@ -51,7 +51,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2(<vsc
51
51
;
52
52
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2:
53
53
; CHECK-64: # %bb.0: # %entry
54
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
54
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
55
55
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
56
56
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
57
57
; CHECK-64-NEXT: ret
@@ -84,7 +84,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3(<vsc
84
84
;
85
85
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3:
86
86
; CHECK-64: # %bb.0: # %entry
87
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
87
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
88
88
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
89
89
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
90
90
; CHECK-64-NEXT: ret
@@ -117,7 +117,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8(<vsc
117
117
;
118
118
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8:
119
119
; CHECK-64: # %bb.0: # %entry
120
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
120
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
121
121
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
122
122
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
123
123
; CHECK-64-NEXT: ret
@@ -152,7 +152,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9(<vsc
152
152
;
153
153
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9:
154
154
; CHECK-64: # %bb.0: # %entry
155
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
155
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
156
156
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
157
157
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
158
158
; CHECK-64-NEXT: ret
@@ -187,7 +187,7 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15(<vs
187
187
;
188
188
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15:
189
189
; CHECK-64: # %bb.0: # %entry
190
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
190
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
191
191
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
192
192
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
193
193
; CHECK-64-NEXT: ret
@@ -213,14 +213,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16(<vs
213
213
;
214
214
; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
215
215
; CHECK-512: # %bb.0: # %entry
216
- ; CHECK-512-NEXT: vsetvli a2, zero , e32, m1, ta, ma
216
+ ; CHECK-512-NEXT: vsetivli zero, 16 , e32, m1, ta, ma
217
217
; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
218
218
; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
219
219
; CHECK-512-NEXT: ret
220
220
;
221
221
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16:
222
222
; CHECK-64: # %bb.0: # %entry
223
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
223
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
224
224
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
225
225
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
226
226
; CHECK-64-NEXT: ret
@@ -247,14 +247,14 @@ define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(<
247
247
;
248
248
; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
249
249
; CHECK-512: # %bb.0: # %entry
250
- ; CHECK-512-NEXT: vsetvli a2, zero , e32, m1, ta, ma
250
+ ; CHECK-512-NEXT: vsetivli zero, 16 , e32, m1, ta, ma
251
251
; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
252
252
; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
253
253
; CHECK-512-NEXT: ret
254
254
;
255
255
; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047:
256
256
; CHECK-64: # %bb.0: # %entry
257
- ; CHECK-64-NEXT: vsetvli a2, zero , e32, m1, ta, ma
257
+ ; CHECK-64-NEXT: vsetivli zero, 2 , e32, m1, ta, ma
258
258
; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
259
259
; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
260
260
; CHECK-64-NEXT: ret
@@ -269,12 +269,26 @@ entry:
269
269
}
270
270
271
271
define <vscale x 1 x i64 > @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048 (<vscale x 1 x i64 > %0 , i64 %1 ) nounwind {
272
- ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
273
- ; CHECK: # %bb.0: # %entry
274
- ; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
275
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
276
- ; CHECK-NEXT: vslide1down.vx v8, v8, a1
277
- ; CHECK-NEXT: ret
272
+ ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
273
+ ; CHECK-128-65536: # %bb.0: # %entry
274
+ ; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma
275
+ ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0
276
+ ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1
277
+ ; CHECK-128-65536-NEXT: ret
278
+ ;
279
+ ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
280
+ ; CHECK-512: # %bb.0: # %entry
281
+ ; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma
282
+ ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0
283
+ ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1
284
+ ; CHECK-512-NEXT: ret
285
+ ;
286
+ ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048:
287
+ ; CHECK-64: # %bb.0: # %entry
288
+ ; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma
289
+ ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0
290
+ ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1
291
+ ; CHECK-64-NEXT: ret
278
292
entry:
279
293
%a = call <vscale x 1 x i64 > @llvm.riscv.vslide1down.nxv1i64.i64 (
280
294
<vscale x 1 x i64 > undef ,
0 commit comments