Skip to content

Commit 6cd68c2

Browse files
committed
[X86] Add base SSE2 coverage to SRL/SRA combines tests
1 parent 7dc4d5f commit 6cd68c2

File tree

2 files changed

+378
-167
lines changed

2 files changed

+378
-167
lines changed

llvm/test/CodeGen/X86/combine-sra.ll

Lines changed: 185 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2-
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
2+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3+
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
34
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-SLOW
45
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST-ALL
56
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2,AVX2-FAST-PERLANE
@@ -86,19 +87,33 @@ define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
8687
}
8788

8889
define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
89-
; SSE-LABEL: combine_vec_ashr_ashr1:
90-
; SSE: # %bb.0:
91-
; SSE-NEXT: movdqa %xmm0, %xmm1
92-
; SSE-NEXT: psrad $10, %xmm1
93-
; SSE-NEXT: movdqa %xmm0, %xmm2
94-
; SSE-NEXT: psrad $6, %xmm2
95-
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
96-
; SSE-NEXT: movdqa %xmm0, %xmm1
97-
; SSE-NEXT: psrad $8, %xmm1
98-
; SSE-NEXT: psrad $4, %xmm0
99-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
100-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
101-
; SSE-NEXT: retq
90+
; SSE2-LABEL: combine_vec_ashr_ashr1:
91+
; SSE2: # %bb.0:
92+
; SSE2-NEXT: movdqa %xmm0, %xmm1
93+
; SSE2-NEXT: psrad $10, %xmm1
94+
; SSE2-NEXT: movdqa %xmm0, %xmm2
95+
; SSE2-NEXT: psrad $8, %xmm2
96+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
97+
; SSE2-NEXT: movdqa %xmm0, %xmm1
98+
; SSE2-NEXT: psrad $6, %xmm1
99+
; SSE2-NEXT: psrad $4, %xmm0
100+
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
101+
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
102+
; SSE2-NEXT: retq
103+
;
104+
; SSE41-LABEL: combine_vec_ashr_ashr1:
105+
; SSE41: # %bb.0:
106+
; SSE41-NEXT: movdqa %xmm0, %xmm1
107+
; SSE41-NEXT: psrad $10, %xmm1
108+
; SSE41-NEXT: movdqa %xmm0, %xmm2
109+
; SSE41-NEXT: psrad $6, %xmm2
110+
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
111+
; SSE41-NEXT: movdqa %xmm0, %xmm1
112+
; SSE41-NEXT: psrad $8, %xmm1
113+
; SSE41-NEXT: psrad $4, %xmm0
114+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
115+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
116+
; SSE41-NEXT: retq
102117
;
103118
; AVX-LABEL: combine_vec_ashr_ashr1:
104119
; AVX: # %bb.0:
@@ -125,16 +140,30 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
125140
}
126141

127142
define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
128-
; SSE-LABEL: combine_vec_ashr_ashr3:
129-
; SSE: # %bb.0:
130-
; SSE-NEXT: movdqa %xmm0, %xmm1
131-
; SSE-NEXT: psrad $27, %xmm1
132-
; SSE-NEXT: movdqa %xmm0, %xmm2
133-
; SSE-NEXT: psrad $15, %xmm2
134-
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
135-
; SSE-NEXT: psrad $31, %xmm0
136-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
137-
; SSE-NEXT: retq
143+
; SSE2-LABEL: combine_vec_ashr_ashr3:
144+
; SSE2: # %bb.0:
145+
; SSE2-NEXT: movdqa %xmm0, %xmm2
146+
; SSE2-NEXT: psrad $27, %xmm2
147+
; SSE2-NEXT: movdqa %xmm0, %xmm1
148+
; SSE2-NEXT: psrad $31, %xmm1
149+
; SSE2-NEXT: movdqa %xmm1, %xmm3
150+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm2[1]
151+
; SSE2-NEXT: psrad $15, %xmm0
152+
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
153+
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm3[0,3]
154+
; SSE2-NEXT: movaps %xmm1, %xmm0
155+
; SSE2-NEXT: retq
156+
;
157+
; SSE41-LABEL: combine_vec_ashr_ashr3:
158+
; SSE41: # %bb.0:
159+
; SSE41-NEXT: movdqa %xmm0, %xmm1
160+
; SSE41-NEXT: psrad $27, %xmm1
161+
; SSE41-NEXT: movdqa %xmm0, %xmm2
162+
; SSE41-NEXT: psrad $15, %xmm2
163+
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
164+
; SSE41-NEXT: psrad $31, %xmm0
165+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
166+
; SSE41-NEXT: retq
138167
;
139168
; AVX-LABEL: combine_vec_ashr_ashr3:
140169
; AVX: # %bb.0:
@@ -147,26 +176,48 @@ define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
147176

148177
; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
149178
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
150-
; SSE-LABEL: combine_vec_ashr_trunc_and:
151-
; SSE: # %bb.0:
152-
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
153-
; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
154-
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
155-
; SSE-NEXT: movdqa %xmm0, %xmm3
156-
; SSE-NEXT: psrad %xmm2, %xmm3
157-
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
158-
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
159-
; SSE-NEXT: movdqa %xmm0, %xmm5
160-
; SSE-NEXT: psrad %xmm4, %xmm5
161-
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
162-
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
163-
; SSE-NEXT: movdqa %xmm0, %xmm3
164-
; SSE-NEXT: psrad %xmm1, %xmm3
165-
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
166-
; SSE-NEXT: psrad %xmm1, %xmm0
167-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
168-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
169-
; SSE-NEXT: retq
179+
; SSE2-LABEL: combine_vec_ashr_trunc_and:
180+
; SSE2: # %bb.0:
181+
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
182+
; SSE2-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
183+
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
184+
; SSE2-NEXT: movdqa %xmm0, %xmm3
185+
; SSE2-NEXT: psrad %xmm2, %xmm3
186+
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
187+
; SSE2-NEXT: movdqa %xmm0, %xmm2
188+
; SSE2-NEXT: psrad %xmm4, %xmm2
189+
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
190+
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
191+
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
192+
; SSE2-NEXT: movdqa %xmm0, %xmm4
193+
; SSE2-NEXT: psrad %xmm3, %xmm4
194+
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
195+
; SSE2-NEXT: psrad %xmm1, %xmm0
196+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
197+
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
198+
; SSE2-NEXT: movaps %xmm2, %xmm0
199+
; SSE2-NEXT: retq
200+
;
201+
; SSE41-LABEL: combine_vec_ashr_trunc_and:
202+
; SSE41: # %bb.0:
203+
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
204+
; SSE41-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
205+
; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
206+
; SSE41-NEXT: movdqa %xmm0, %xmm3
207+
; SSE41-NEXT: psrad %xmm2, %xmm3
208+
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
209+
; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
210+
; SSE41-NEXT: movdqa %xmm0, %xmm5
211+
; SSE41-NEXT: psrad %xmm4, %xmm5
212+
; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
213+
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
214+
; SSE41-NEXT: movdqa %xmm0, %xmm3
215+
; SSE41-NEXT: psrad %xmm1, %xmm3
216+
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
217+
; SSE41-NEXT: psrad %xmm1, %xmm0
218+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
219+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
220+
; SSE41-NEXT: retq
170221
;
171222
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_and:
172223
; AVX2-SLOW: # %bb.0:
@@ -211,17 +262,31 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
211262
; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
212263
; if c1 is equal to the number of bits the trunc removes
213264
define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
214-
; SSE-LABEL: combine_vec_ashr_trunc_lshr:
215-
; SSE: # %bb.0:
216-
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
217-
; SSE-NEXT: movaps %xmm0, %xmm2
218-
; SSE-NEXT: psrad $2, %xmm2
219-
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
220-
; SSE-NEXT: psrad $1, %xmm0
221-
; SSE-NEXT: psrad $3, %xmm1
222-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
223-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
224-
; SSE-NEXT: retq
265+
; SSE2-LABEL: combine_vec_ashr_trunc_lshr:
266+
; SSE2: # %bb.0:
267+
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
268+
; SSE2-NEXT: movaps %xmm0, %xmm1
269+
; SSE2-NEXT: psrad $3, %xmm1
270+
; SSE2-NEXT: movaps %xmm0, %xmm2
271+
; SSE2-NEXT: psrad $2, %xmm2
272+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
273+
; SSE2-NEXT: movaps %xmm0, %xmm1
274+
; SSE2-NEXT: psrad $1, %xmm1
275+
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
276+
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
277+
; SSE2-NEXT: retq
278+
;
279+
; SSE41-LABEL: combine_vec_ashr_trunc_lshr:
280+
; SSE41: # %bb.0:
281+
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
282+
; SSE41-NEXT: movaps %xmm0, %xmm2
283+
; SSE41-NEXT: psrad $2, %xmm2
284+
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
285+
; SSE41-NEXT: psrad $1, %xmm0
286+
; SSE41-NEXT: psrad $3, %xmm1
287+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
288+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
289+
; SSE41-NEXT: retq
225290
;
226291
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_lshr:
227292
; AVX2-SLOW: # %bb.0:
@@ -298,17 +363,31 @@ define <16 x i8> @combine_vec_ashr_trunc_lshr_splat(<16 x i32> %x) {
298363
; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
299364
; if c1 is equal to the number of bits the trunc removes
300365
define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
301-
; SSE-LABEL: combine_vec_ashr_trunc_ashr:
302-
; SSE: # %bb.0:
303-
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
304-
; SSE-NEXT: movaps %xmm0, %xmm2
305-
; SSE-NEXT: psrad $2, %xmm2
306-
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
307-
; SSE-NEXT: psrad $1, %xmm0
308-
; SSE-NEXT: psrad $3, %xmm1
309-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
310-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
311-
; SSE-NEXT: retq
366+
; SSE2-LABEL: combine_vec_ashr_trunc_ashr:
367+
; SSE2: # %bb.0:
368+
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
369+
; SSE2-NEXT: movaps %xmm0, %xmm1
370+
; SSE2-NEXT: psrad $3, %xmm1
371+
; SSE2-NEXT: movaps %xmm0, %xmm2
372+
; SSE2-NEXT: psrad $2, %xmm2
373+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
374+
; SSE2-NEXT: movaps %xmm0, %xmm1
375+
; SSE2-NEXT: psrad $1, %xmm1
376+
; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
377+
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
378+
; SSE2-NEXT: retq
379+
;
380+
; SSE41-LABEL: combine_vec_ashr_trunc_ashr:
381+
; SSE41: # %bb.0:
382+
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
383+
; SSE41-NEXT: movaps %xmm0, %xmm2
384+
; SSE41-NEXT: psrad $2, %xmm2
385+
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
386+
; SSE41-NEXT: psrad $1, %xmm0
387+
; SSE41-NEXT: psrad $3, %xmm1
388+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
389+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
390+
; SSE41-NEXT: retq
312391
;
313392
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_ashr:
314393
; AVX2-SLOW: # %bb.0:
@@ -377,25 +456,46 @@ define <8 x i16> @combine_vec_ashr_trunc_ashr_splat(<8 x i32> %x) {
377456

378457
; If the sign bit is known to be zero, switch this to a SRL.
379458
define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
380-
; SSE-LABEL: combine_vec_ashr_positive:
381-
; SSE: # %bb.0:
382-
; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
383-
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
384-
; SSE-NEXT: movdqa %xmm0, %xmm3
385-
; SSE-NEXT: psrld %xmm2, %xmm3
386-
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
387-
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
388-
; SSE-NEXT: movdqa %xmm0, %xmm5
389-
; SSE-NEXT: psrld %xmm4, %xmm5
390-
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
391-
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
392-
; SSE-NEXT: movdqa %xmm0, %xmm3
393-
; SSE-NEXT: psrld %xmm1, %xmm3
394-
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
395-
; SSE-NEXT: psrld %xmm1, %xmm0
396-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
397-
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
398-
; SSE-NEXT: retq
459+
; SSE2-LABEL: combine_vec_ashr_positive:
460+
; SSE2: # %bb.0:
461+
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
462+
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
463+
; SSE2-NEXT: movdqa %xmm0, %xmm3
464+
; SSE2-NEXT: psrld %xmm2, %xmm3
465+
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7]
466+
; SSE2-NEXT: movdqa %xmm0, %xmm2
467+
; SSE2-NEXT: psrld %xmm4, %xmm2
468+
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
469+
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
470+
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7]
471+
; SSE2-NEXT: movdqa %xmm0, %xmm4
472+
; SSE2-NEXT: psrld %xmm3, %xmm4
473+
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
474+
; SSE2-NEXT: psrld %xmm1, %xmm0
475+
; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm4[1]
476+
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,3]
477+
; SSE2-NEXT: movaps %xmm2, %xmm0
478+
; SSE2-NEXT: retq
479+
;
480+
; SSE41-LABEL: combine_vec_ashr_positive:
481+
; SSE41: # %bb.0:
482+
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
483+
; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
484+
; SSE41-NEXT: movdqa %xmm0, %xmm3
485+
; SSE41-NEXT: psrld %xmm2, %xmm3
486+
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
487+
; SSE41-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
488+
; SSE41-NEXT: movdqa %xmm0, %xmm5
489+
; SSE41-NEXT: psrld %xmm4, %xmm5
490+
; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
491+
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
492+
; SSE41-NEXT: movdqa %xmm0, %xmm3
493+
; SSE41-NEXT: psrld %xmm1, %xmm3
494+
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
495+
; SSE41-NEXT: psrld %xmm1, %xmm0
496+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
497+
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
498+
; SSE41-NEXT: retq
399499
;
400500
; AVX-LABEL: combine_vec_ashr_positive:
401501
; AVX: # %bb.0:

0 commit comments

Comments
 (0)