@@ -64,11 +64,11 @@ void test_builtin_reduce_min(float4 vf1, si8 vi1, u4 vu1) {
64
64
65
65
void test_builtin_reduce_addf (float4 vf4 , double4 vd4 ) {
66
66
// CHECK: [[VF4:%.+]] = load <4 x float>, ptr %vf4.addr, align 16
67
- // CHECK-NEXT: call float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[VF4]])
67
+ // CHECK-NEXT: call float @llvm.vector.reduce.fadd.v4f32(float - 0.000000e+00, <4 x float> [[VF4]])
68
68
float r2 = __builtin_reduce_add (vf4 );
69
69
70
70
// CHECK: [[VD4:%.+]] = load <4 x double>, ptr %vd4.addr, align 16
71
- // CHECK-NEXT: call double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[VD4]])
71
+ // CHECK-NEXT: call double @llvm.vector.reduce.fadd.v4f64(double - 0.000000e+00, <4 x double> [[VD4]])
72
72
double r3 = __builtin_reduce_add (vd4 );
73
73
}
74
74
@@ -96,11 +96,11 @@ void test_builtin_reduce_add(si8 vi1, u4 vu1) {
96
96
97
97
void test_builtin_reduce_mulf (float4 vf4 , double4 vd4 ) {
98
98
// CHECK: [[VF4:%.+]] = load <4 x float>, ptr %vf4.addr, align 16
99
- // CHECK-NEXT: call float @llvm.vector.reduce.fmul.v4f32(float 0 .000000e+00, <4 x float> [[VF4]])
99
+ // CHECK-NEXT: call float @llvm.vector.reduce.fmul.v4f32(float 1 .000000e+00, <4 x float> [[VF4]])
100
100
float r2 = __builtin_reduce_mul (vf4 );
101
101
102
102
// CHECK: [[VD4:%.+]] = load <4 x double>, ptr %vd4.addr, align 16
103
- // CHECK-NEXT: call double @llvm.vector.reduce.fmul.v4f64(double 0 .000000e+00, <4 x double> [[VD4]])
103
+ // CHECK-NEXT: call double @llvm.vector.reduce.fmul.v4f64(double 1 .000000e+00, <4 x double> [[VD4]])
104
104
double r3 = __builtin_reduce_mul (vd4 );
105
105
}
106
106
0 commit comments