Skip to content

Commit 57aee4e

Browse files
committed
[RISCV] Add check-prefixes ZVFH for zvfh and CHECK for zvfhmin. NFC.
It has more readability to show the difference or the same between Zvfh and Zvfhmin.
1 parent d0580b8 commit 57aee4e

20 files changed

+1636
-6270
lines changed

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll

Lines changed: 126 additions & 529 deletions
Large diffs are not rendered by default.

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll

Lines changed: 126 additions & 529 deletions
Large diffs are not rendered by default.

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll

Lines changed: 89 additions & 224 deletions
Large diffs are not rendered by default.

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll

Lines changed: 89 additions & 224 deletions
Large diffs are not rendered by default.
Lines changed: 36 additions & 174 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,24 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
22
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
3-
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
3+
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
44
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
5-
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
5+
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
66
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
7-
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
7+
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
88
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
9-
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
9+
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
1010

1111
declare half @llvm.vp.reduce.fadd.v2f16(half, <2 x half>, <2 x i1>, i32)
1212

1313
define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
14-
; CHECK-LABEL: vpreduce_fadd_v2f16:
15-
; CHECK: # %bb.0:
16-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
17-
; CHECK-NEXT: vfmv.s.f v9, fa0
18-
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
19-
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
20-
; CHECK-NEXT: vfmv.f.s fa0, v9
21-
; CHECK-NEXT: ret
14+
; ZVFH-LABEL: vpreduce_fadd_v2f16:
15+
; ZVFH: # %bb.0:
16+
; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
17+
; ZVFH-NEXT: vfmv.s.f v9, fa0
18+
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
19+
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
20+
; ZVFH-NEXT: vfmv.f.s fa0, v9
21+
; ZVFH-NEXT: ret
2222
;
2323
; ZVFHMIN-LABEL: vpreduce_fadd_v2f16:
2424
; ZVFHMIN: # %bb.0:
@@ -37,14 +37,14 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroex
3737
}
3838

3939
define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
40-
; CHECK-LABEL: vpreduce_ord_fadd_v2f16:
41-
; CHECK: # %bb.0:
42-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
43-
; CHECK-NEXT: vfmv.s.f v9, fa0
44-
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
45-
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
46-
; CHECK-NEXT: vfmv.f.s fa0, v9
47-
; CHECK-NEXT: ret
40+
; ZVFH-LABEL: vpreduce_ord_fadd_v2f16:
41+
; ZVFH: # %bb.0:
42+
; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
43+
; ZVFH-NEXT: vfmv.s.f v9, fa0
44+
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
45+
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
46+
; ZVFH-NEXT: vfmv.f.s fa0, v9
47+
; ZVFH-NEXT: ret
4848
;
4949
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f16:
5050
; ZVFHMIN: # %bb.0:
@@ -65,14 +65,14 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 ze
6565
declare half @llvm.vp.reduce.fadd.v4f16(half, <4 x half>, <4 x i1>, i32)
6666

6767
define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
68-
; CHECK-LABEL: vpreduce_fadd_v4f16:
69-
; CHECK: # %bb.0:
70-
; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
71-
; CHECK-NEXT: vfmv.s.f v9, fa0
72-
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
73-
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
74-
; CHECK-NEXT: vfmv.f.s fa0, v9
75-
; CHECK-NEXT: ret
68+
; ZVFH-LABEL: vpreduce_fadd_v4f16:
69+
; ZVFH: # %bb.0:
70+
; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
71+
; ZVFH-NEXT: vfmv.s.f v9, fa0
72+
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
73+
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
74+
; ZVFH-NEXT: vfmv.f.s fa0, v9
75+
; ZVFH-NEXT: ret
7676
;
7777
; ZVFHMIN-LABEL: vpreduce_fadd_v4f16:
7878
; ZVFHMIN: # %bb.0:
@@ -91,14 +91,14 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroex
9191
}
9292

9393
define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
94-
; CHECK-LABEL: vpreduce_ord_fadd_v4f16:
95-
; CHECK: # %bb.0:
96-
; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
97-
; CHECK-NEXT: vfmv.s.f v9, fa0
98-
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
99-
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
100-
; CHECK-NEXT: vfmv.f.s fa0, v9
101-
; CHECK-NEXT: ret
94+
; ZVFH-LABEL: vpreduce_ord_fadd_v4f16:
95+
; ZVFH: # %bb.0:
96+
; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
97+
; ZVFH-NEXT: vfmv.s.f v9, fa0
98+
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
99+
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
100+
; ZVFH-NEXT: vfmv.f.s fa0, v9
101+
; ZVFH-NEXT: ret
102102
;
103103
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f16:
104104
; ZVFHMIN: # %bb.0:
@@ -127,15 +127,6 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zer
127127
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
128128
; CHECK-NEXT: vfmv.f.s fa0, v9
129129
; CHECK-NEXT: ret
130-
;
131-
; ZVFHMIN-LABEL: vpreduce_fadd_v2f32:
132-
; ZVFHMIN: # %bb.0:
133-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
134-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
135-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
136-
; ZVFHMIN-NEXT: vfredusum.vs v9, v8, v9, v0.t
137-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
138-
; ZVFHMIN-NEXT: ret
139130
%r = call reassoc float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl)
140131
ret float %r
141132
}
@@ -149,15 +140,6 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32
149140
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
150141
; CHECK-NEXT: vfmv.f.s fa0, v9
151142
; CHECK-NEXT: ret
152-
;
153-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f32:
154-
; ZVFHMIN: # %bb.0:
155-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
156-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
157-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
158-
; ZVFHMIN-NEXT: vfredosum.vs v9, v8, v9, v0.t
159-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
160-
; ZVFHMIN-NEXT: ret
161143
%r = call float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl)
162144
ret float %r
163145
}
@@ -173,15 +155,6 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zer
173155
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
174156
; CHECK-NEXT: vfmv.f.s fa0, v9
175157
; CHECK-NEXT: ret
176-
;
177-
; ZVFHMIN-LABEL: vpreduce_fadd_v4f32:
178-
; ZVFHMIN: # %bb.0:
179-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m1, ta, ma
180-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
181-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
182-
; ZVFHMIN-NEXT: vfredusum.vs v9, v8, v9, v0.t
183-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
184-
; ZVFHMIN-NEXT: ret
185158
%r = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl)
186159
ret float %r
187160
}
@@ -195,15 +168,6 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32
195168
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
196169
; CHECK-NEXT: vfmv.f.s fa0, v9
197170
; CHECK-NEXT: ret
198-
;
199-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f32:
200-
; ZVFHMIN: # %bb.0:
201-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m1, ta, ma
202-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
203-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
204-
; ZVFHMIN-NEXT: vfredosum.vs v9, v8, v9, v0.t
205-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
206-
; ZVFHMIN-NEXT: ret
207171
%r = call float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl)
208172
ret float %r
209173
}
@@ -234,30 +198,6 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
234198
; CHECK-NEXT: vfredusum.vs v25, v16, v25, v0.t
235199
; CHECK-NEXT: vfmv.f.s fa0, v25
236200
; CHECK-NEXT: ret
237-
;
238-
; ZVFHMIN-LABEL: vpreduce_fadd_v64f32:
239-
; ZVFHMIN: # %bb.0:
240-
; ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
241-
; ZVFHMIN-NEXT: li a2, 32
242-
; ZVFHMIN-NEXT: vslidedown.vi v24, v0, 4
243-
; ZVFHMIN-NEXT: mv a1, a0
244-
; ZVFHMIN-NEXT: bltu a0, a2, .LBB8_2
245-
; ZVFHMIN-NEXT: # %bb.1:
246-
; ZVFHMIN-NEXT: li a1, 32
247-
; ZVFHMIN-NEXT: .LBB8_2:
248-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m1, ta, ma
249-
; ZVFHMIN-NEXT: vfmv.s.f v25, fa0
250-
; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
251-
; ZVFHMIN-NEXT: vfredusum.vs v25, v8, v25, v0.t
252-
; ZVFHMIN-NEXT: addi a1, a0, -32
253-
; ZVFHMIN-NEXT: sltu a0, a0, a1
254-
; ZVFHMIN-NEXT: addi a0, a0, -1
255-
; ZVFHMIN-NEXT: and a0, a0, a1
256-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
257-
; ZVFHMIN-NEXT: vmv1r.v v0, v24
258-
; ZVFHMIN-NEXT: vfredusum.vs v25, v16, v25, v0.t
259-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v25
260-
; ZVFHMIN-NEXT: ret
261201
%r = call reassoc float @llvm.vp.reduce.fadd.v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 %evl)
262202
ret float %r
263203
}
@@ -286,30 +226,6 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m,
286226
; CHECK-NEXT: vfredosum.vs v25, v16, v25, v0.t
287227
; CHECK-NEXT: vfmv.f.s fa0, v25
288228
; CHECK-NEXT: ret
289-
;
290-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v64f32:
291-
; ZVFHMIN: # %bb.0:
292-
; ZVFHMIN-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
293-
; ZVFHMIN-NEXT: li a2, 32
294-
; ZVFHMIN-NEXT: vslidedown.vi v24, v0, 4
295-
; ZVFHMIN-NEXT: mv a1, a0
296-
; ZVFHMIN-NEXT: bltu a0, a2, .LBB9_2
297-
; ZVFHMIN-NEXT: # %bb.1:
298-
; ZVFHMIN-NEXT: li a1, 32
299-
; ZVFHMIN-NEXT: .LBB9_2:
300-
; ZVFHMIN-NEXT: vsetivli zero, 1, e32, m1, ta, ma
301-
; ZVFHMIN-NEXT: vfmv.s.f v25, fa0
302-
; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
303-
; ZVFHMIN-NEXT: vfredosum.vs v25, v8, v25, v0.t
304-
; ZVFHMIN-NEXT: addi a1, a0, -32
305-
; ZVFHMIN-NEXT: sltu a0, a0, a1
306-
; ZVFHMIN-NEXT: addi a0, a0, -1
307-
; ZVFHMIN-NEXT: and a0, a0, a1
308-
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
309-
; ZVFHMIN-NEXT: vmv1r.v v0, v24
310-
; ZVFHMIN-NEXT: vfredosum.vs v25, v16, v25, v0.t
311-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v25
312-
; ZVFHMIN-NEXT: ret
313229
%r = call float @llvm.vp.reduce.fadd.v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 %evl)
314230
ret float %r
315231
}
@@ -325,15 +241,6 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32
325241
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
326242
; CHECK-NEXT: vfmv.f.s fa0, v9
327243
; CHECK-NEXT: ret
328-
;
329-
; ZVFHMIN-LABEL: vpreduce_fadd_v2f64:
330-
; ZVFHMIN: # %bb.0:
331-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
332-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
333-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
334-
; ZVFHMIN-NEXT: vfredusum.vs v9, v8, v9, v0.t
335-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
336-
; ZVFHMIN-NEXT: ret
337244
%r = call reassoc double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl)
338245
ret double %r
339246
}
@@ -347,15 +254,6 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m,
347254
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
348255
; CHECK-NEXT: vfmv.f.s fa0, v9
349256
; CHECK-NEXT: ret
350-
;
351-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f64:
352-
; ZVFHMIN: # %bb.0:
353-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
354-
; ZVFHMIN-NEXT: vfmv.s.f v9, fa0
355-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
356-
; ZVFHMIN-NEXT: vfredosum.vs v9, v8, v9, v0.t
357-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v9
358-
; ZVFHMIN-NEXT: ret
359257
%r = call double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl)
360258
ret double %r
361259
}
@@ -371,15 +269,6 @@ define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32
371269
; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t
372270
; CHECK-NEXT: vfmv.f.s fa0, v10
373271
; CHECK-NEXT: ret
374-
;
375-
; ZVFHMIN-LABEL: vpreduce_fadd_v3f64:
376-
; ZVFHMIN: # %bb.0:
377-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
378-
; ZVFHMIN-NEXT: vfmv.s.f v10, fa0
379-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
380-
; ZVFHMIN-NEXT: vfredusum.vs v10, v8, v10, v0.t
381-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v10
382-
; ZVFHMIN-NEXT: ret
383272
%r = call reassoc double @llvm.vp.reduce.fadd.v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 %evl)
384273
ret double %r
385274
}
@@ -393,15 +282,6 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m,
393282
; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t
394283
; CHECK-NEXT: vfmv.f.s fa0, v10
395284
; CHECK-NEXT: ret
396-
;
397-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v3f64:
398-
; ZVFHMIN: # %bb.0:
399-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
400-
; ZVFHMIN-NEXT: vfmv.s.f v10, fa0
401-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
402-
; ZVFHMIN-NEXT: vfredosum.vs v10, v8, v10, v0.t
403-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v10
404-
; ZVFHMIN-NEXT: ret
405285
%r = call double @llvm.vp.reduce.fadd.v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 %evl)
406286
ret double %r
407287
}
@@ -417,15 +297,6 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32
417297
; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t
418298
; CHECK-NEXT: vfmv.f.s fa0, v10
419299
; CHECK-NEXT: ret
420-
;
421-
; ZVFHMIN-LABEL: vpreduce_fadd_v4f64:
422-
; ZVFHMIN: # %bb.0:
423-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
424-
; ZVFHMIN-NEXT: vfmv.s.f v10, fa0
425-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
426-
; ZVFHMIN-NEXT: vfredusum.vs v10, v8, v10, v0.t
427-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v10
428-
; ZVFHMIN-NEXT: ret
429300
%r = call reassoc double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl)
430301
ret double %r
431302
}
@@ -439,15 +310,6 @@ define double @vpreduce_ord_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m,
439310
; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t
440311
; CHECK-NEXT: vfmv.f.s fa0, v10
441312
; CHECK-NEXT: ret
442-
;
443-
; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f64:
444-
; ZVFHMIN: # %bb.0:
445-
; ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
446-
; ZVFHMIN-NEXT: vfmv.s.f v10, fa0
447-
; ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
448-
; ZVFHMIN-NEXT: vfredosum.vs v10, v8, v10, v0.t
449-
; ZVFHMIN-NEXT: vfmv.f.s fa0, v10
450-
; ZVFHMIN-NEXT: ret
451313
%r = call double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl)
452314
ret double %r
453315
}

0 commit comments

Comments
 (0)