@@ -247,13 +247,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
247
247
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
248
248
; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
249
249
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
250
- ; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm8
250
+ ; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
251
251
; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
252
- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[0,1]
253
- ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1 [0,1,2],xmm8 [0]
254
- ; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm5, %xmm3
255
- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[3,1 ]
256
- ; AVX2-SLOW-NEXT: vaddps %xmm2 , %xmm1 , %xmm1
252
+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
253
+ ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3 [0,1,2],xmm4 [0]
254
+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
255
+ ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3 ]
256
+ ; AVX2-SLOW-NEXT: vaddps %xmm1 , %xmm3 , %xmm1
257
257
; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
258
258
; AVX2-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
259
259
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -268,13 +268,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
268
268
; AVX2-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
269
269
; AVX2-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
270
270
; AVX2-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
271
- ; AVX2-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm8
271
+ ; AVX2-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
272
272
; AVX2-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
273
- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[0,1]
274
- ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1 [0,1,2],xmm8 [0]
275
- ; AVX2-FAST-NEXT: vhaddps %xmm4, %xmm5, %xmm3
276
- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[3,1 ]
277
- ; AVX2-FAST-NEXT: vaddps %xmm2 , %xmm1 , %xmm1
273
+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
274
+ ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3 [0,1,2],xmm4 [0]
275
+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
276
+ ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3 ]
277
+ ; AVX2-FAST-NEXT: vaddps %xmm1 , %xmm3 , %xmm1
278
278
; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
279
279
; AVX2-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
280
280
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -424,7 +424,7 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
424
424
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
425
425
; AVX2-SLOW-NEXT: vpbroadcastd %xmm4, %xmm5
426
426
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3]
427
- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1 ]
427
+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3 ]
428
428
; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
429
429
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
430
430
; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -447,7 +447,7 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
447
447
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
448
448
; AVX2-FAST-NEXT: vpbroadcastd %xmm4, %xmm5
449
449
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3]
450
- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1 ]
450
+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3 ]
451
451
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
452
452
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
453
453
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
0 commit comments