Skip to content

Commit 74634f4

Browse files
committed
[SLP][X86] Add test case for Issue #48223
1 parent 96bbd35 commit 74634f4

File tree

1 file changed

+189
-0
lines changed

1 file changed

+189
-0
lines changed
Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -opaque-pointers -mcpu=x86-64 -S | FileCheck %s --check-prefixes=SSE
3+
; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -opaque-pointers -mcpu=x86-64-v2 -S | FileCheck %s --check-prefixes=AVX
4+
; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -opaque-pointers -mcpu=x86-64-v3 -S | FileCheck %s --check-prefixes=AVX
5+
; RUN: opt < %s -mtriple=x86_64-unknown -slp-vectorizer -opaque-pointers -mcpu=x86-64-v4 -S | FileCheck %s --check-prefixes=AVX
6+
7+
define { i64, i64 } @compute_min(ptr nocapture noundef nonnull readonly align 2 dereferenceable(16) %x, ptr nocapture noundef nonnull readonly align 2 dereferenceable(16) %y) {
8+
; SSE-LABEL: @compute_min(
9+
; SSE-NEXT: entry:
10+
; SSE-NEXT: [[TMP0:%.*]] = load i16, ptr [[Y:%.*]], align 2
11+
; SSE-NEXT: [[TMP1:%.*]] = load i16, ptr [[X:%.*]], align 2
12+
; SSE-NEXT: [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP0]], i16 [[TMP1]])
13+
; SSE-NEXT: [[ARRAYIDX_I_I_1:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 1
14+
; SSE-NEXT: [[ARRAYIDX_I_I10_1:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 1
15+
; SSE-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_1]], align 2
16+
; SSE-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_I_I_1]], align 2
17+
; SSE-NEXT: [[TMP5:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP3]], i16 [[TMP4]])
18+
; SSE-NEXT: [[ARRAYIDX_I_I_2:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 2
19+
; SSE-NEXT: [[ARRAYIDX_I_I10_2:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 2
20+
; SSE-NEXT: [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_2]], align 2
21+
; SSE-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX_I_I_2]], align 2
22+
; SSE-NEXT: [[TMP8:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP6]], i16 [[TMP7]])
23+
; SSE-NEXT: [[ARRAYIDX_I_I_3:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 3
24+
; SSE-NEXT: [[ARRAYIDX_I_I10_3:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 3
25+
; SSE-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_3]], align 2
26+
; SSE-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX_I_I_3]], align 2
27+
; SSE-NEXT: [[TMP11:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP9]], i16 [[TMP10]])
28+
; SSE-NEXT: [[ARRAYIDX_I_I_4:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 4
29+
; SSE-NEXT: [[ARRAYIDX_I_I10_4:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 4
30+
; SSE-NEXT: [[TMP12:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_4]], align 2
31+
; SSE-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX_I_I_4]], align 2
32+
; SSE-NEXT: [[TMP14:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP12]], i16 [[TMP13]])
33+
; SSE-NEXT: [[ARRAYIDX_I_I_5:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 5
34+
; SSE-NEXT: [[ARRAYIDX_I_I10_5:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 5
35+
; SSE-NEXT: [[TMP15:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_5]], align 2
36+
; SSE-NEXT: [[TMP16:%.*]] = load i16, ptr [[ARRAYIDX_I_I_5]], align 2
37+
; SSE-NEXT: [[TMP17:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP15]], i16 [[TMP16]])
38+
; SSE-NEXT: [[ARRAYIDX_I_I_6:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 6
39+
; SSE-NEXT: [[ARRAYIDX_I_I10_6:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 6
40+
; SSE-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_6]], align 2
41+
; SSE-NEXT: [[TMP19:%.*]] = load i16, ptr [[ARRAYIDX_I_I_6]], align 2
42+
; SSE-NEXT: [[TMP20:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP18]], i16 [[TMP19]])
43+
; SSE-NEXT: [[ARRAYIDX_I_I_7:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 7
44+
; SSE-NEXT: [[ARRAYIDX_I_I10_7:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 7
45+
; SSE-NEXT: [[TMP21:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_7]], align 2
46+
; SSE-NEXT: [[TMP22:%.*]] = load i16, ptr [[ARRAYIDX_I_I_7]], align 2
47+
; SSE-NEXT: [[TMP23:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP21]], i16 [[TMP22]])
48+
; SSE-NEXT: [[RETVAL_SROA_4_0_INSERT_EXT:%.*]] = zext i16 [[TMP11]] to i64
49+
; SSE-NEXT: [[RETVAL_SROA_4_0_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_4_0_INSERT_EXT]], 48
50+
; SSE-NEXT: [[RETVAL_SROA_3_0_INSERT_EXT:%.*]] = zext i16 [[TMP8]] to i64
51+
; SSE-NEXT: [[RETVAL_SROA_3_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_3_0_INSERT_EXT]], 32
52+
; SSE-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_4_0_INSERT_SHIFT]], [[RETVAL_SROA_3_0_INSERT_SHIFT]]
53+
; SSE-NEXT: [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i16 [[TMP5]] to i64
54+
; SSE-NEXT: [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_2_0_INSERT_EXT]], 16
55+
; SSE-NEXT: [[RETVAL_SROA_2_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], [[RETVAL_SROA_2_0_INSERT_SHIFT]]
56+
; SSE-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[TMP2]] to i64
57+
; SSE-NEXT: [[RETVAL_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_2_0_INSERT_INSERT]], [[RETVAL_SROA_0_0_INSERT_EXT]]
58+
; SSE-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[RETVAL_SROA_0_0_INSERT_INSERT]], 0
59+
; SSE-NEXT: [[RETVAL_SROA_9_8_INSERT_EXT:%.*]] = zext i16 [[TMP23]] to i64
60+
; SSE-NEXT: [[RETVAL_SROA_9_8_INSERT_SHIFT:%.*]] = shl nuw i64 [[RETVAL_SROA_9_8_INSERT_EXT]], 48
61+
; SSE-NEXT: [[RETVAL_SROA_8_8_INSERT_EXT:%.*]] = zext i16 [[TMP20]] to i64
62+
; SSE-NEXT: [[RETVAL_SROA_8_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_8_8_INSERT_EXT]], 32
63+
; SSE-NEXT: [[RETVAL_SROA_8_8_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_9_8_INSERT_SHIFT]], [[RETVAL_SROA_8_8_INSERT_SHIFT]]
64+
; SSE-NEXT: [[RETVAL_SROA_7_8_INSERT_EXT:%.*]] = zext i16 [[TMP17]] to i64
65+
; SSE-NEXT: [[RETVAL_SROA_7_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_7_8_INSERT_EXT]], 16
66+
; SSE-NEXT: [[RETVAL_SROA_7_8_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_8_8_INSERT_INSERT]], [[RETVAL_SROA_7_8_INSERT_SHIFT]]
67+
; SSE-NEXT: [[RETVAL_SROA_5_8_INSERT_EXT:%.*]] = zext i16 [[TMP14]] to i64
68+
; SSE-NEXT: [[RETVAL_SROA_5_8_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_7_8_INSERT_INSERT]], [[RETVAL_SROA_5_8_INSERT_EXT]]
69+
; SSE-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[RETVAL_SROA_5_8_INSERT_INSERT]], 1
70+
; SSE-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]]
71+
;
72+
; AVX-LABEL: @compute_min(
73+
; AVX-NEXT: entry:
74+
; AVX-NEXT: [[TMP0:%.*]] = load i16, ptr [[Y:%.*]], align 2
75+
; AVX-NEXT: [[TMP1:%.*]] = load i16, ptr [[X:%.*]], align 2
76+
; AVX-NEXT: [[TMP2:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP0]], i16 [[TMP1]])
77+
; AVX-NEXT: [[ARRAYIDX_I_I_1:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 1
78+
; AVX-NEXT: [[ARRAYIDX_I_I10_1:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 1
79+
; AVX-NEXT: [[TMP3:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_1]], align 2
80+
; AVX-NEXT: [[TMP4:%.*]] = load i16, ptr [[ARRAYIDX_I_I_1]], align 2
81+
; AVX-NEXT: [[TMP5:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP3]], i16 [[TMP4]])
82+
; AVX-NEXT: [[ARRAYIDX_I_I_2:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 2
83+
; AVX-NEXT: [[ARRAYIDX_I_I10_2:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 2
84+
; AVX-NEXT: [[ARRAYIDX_I_I_4:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 4
85+
; AVX-NEXT: [[ARRAYIDX_I_I10_4:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 4
86+
; AVX-NEXT: [[TMP6:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_4]], align 2
87+
; AVX-NEXT: [[TMP7:%.*]] = load i16, ptr [[ARRAYIDX_I_I_4]], align 2
88+
; AVX-NEXT: [[TMP8:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP6]], i16 [[TMP7]])
89+
; AVX-NEXT: [[ARRAYIDX_I_I_5:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 5
90+
; AVX-NEXT: [[ARRAYIDX_I_I10_5:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 5
91+
; AVX-NEXT: [[TMP9:%.*]] = load i16, ptr [[ARRAYIDX_I_I10_5]], align 2
92+
; AVX-NEXT: [[TMP10:%.*]] = load i16, ptr [[ARRAYIDX_I_I_5]], align 2
93+
; AVX-NEXT: [[TMP11:%.*]] = tail call i16 @llvm.smin.i16(i16 [[TMP9]], i16 [[TMP10]])
94+
; AVX-NEXT: [[ARRAYIDX_I_I_6:%.*]] = getelementptr inbounds [8 x i16], ptr [[X]], i64 0, i64 6
95+
; AVX-NEXT: [[ARRAYIDX_I_I10_6:%.*]] = getelementptr inbounds [8 x i16], ptr [[Y]], i64 0, i64 6
96+
; AVX-NEXT: [[TMP12:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_I_I10_2]], align 2
97+
; AVX-NEXT: [[TMP13:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_I_I_2]], align 2
98+
; AVX-NEXT: [[TMP14:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP12]], <2 x i16> [[TMP13]])
99+
; AVX-NEXT: [[TMP15:%.*]] = zext <2 x i16> [[TMP14]] to <2 x i64>
100+
; AVX-NEXT: [[TMP16:%.*]] = shl nuw <2 x i64> [[TMP15]], <i64 32, i64 48>
101+
; AVX-NEXT: [[TMP17:%.*]] = extractelement <2 x i64> [[TMP16]], i32 0
102+
; AVX-NEXT: [[TMP18:%.*]] = extractelement <2 x i64> [[TMP16]], i32 1
103+
; AVX-NEXT: [[RETVAL_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[TMP18]], [[TMP17]]
104+
; AVX-NEXT: [[RETVAL_SROA_2_0_INSERT_EXT:%.*]] = zext i16 [[TMP5]] to i64
105+
; AVX-NEXT: [[RETVAL_SROA_2_0_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_2_0_INSERT_EXT]], 16
106+
; AVX-NEXT: [[RETVAL_SROA_2_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_3_0_INSERT_INSERT]], [[RETVAL_SROA_2_0_INSERT_SHIFT]]
107+
; AVX-NEXT: [[RETVAL_SROA_0_0_INSERT_EXT:%.*]] = zext i16 [[TMP2]] to i64
108+
; AVX-NEXT: [[RETVAL_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_2_0_INSERT_INSERT]], [[RETVAL_SROA_0_0_INSERT_EXT]]
109+
; AVX-NEXT: [[DOTFCA_0_INSERT:%.*]] = insertvalue { i64, i64 } poison, i64 [[RETVAL_SROA_0_0_INSERT_INSERT]], 0
110+
; AVX-NEXT: [[TMP19:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_I_I10_6]], align 2
111+
; AVX-NEXT: [[TMP20:%.*]] = load <2 x i16>, ptr [[ARRAYIDX_I_I_6]], align 2
112+
; AVX-NEXT: [[TMP21:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP19]], <2 x i16> [[TMP20]])
113+
; AVX-NEXT: [[TMP22:%.*]] = zext <2 x i16> [[TMP21]] to <2 x i64>
114+
; AVX-NEXT: [[TMP23:%.*]] = shl nuw <2 x i64> [[TMP22]], <i64 32, i64 48>
115+
; AVX-NEXT: [[TMP24:%.*]] = extractelement <2 x i64> [[TMP23]], i32 0
116+
; AVX-NEXT: [[TMP25:%.*]] = extractelement <2 x i64> [[TMP23]], i32 1
117+
; AVX-NEXT: [[RETVAL_SROA_8_8_INSERT_INSERT:%.*]] = or i64 [[TMP25]], [[TMP24]]
118+
; AVX-NEXT: [[RETVAL_SROA_7_8_INSERT_EXT:%.*]] = zext i16 [[TMP11]] to i64
119+
; AVX-NEXT: [[RETVAL_SROA_7_8_INSERT_SHIFT:%.*]] = shl nuw nsw i64 [[RETVAL_SROA_7_8_INSERT_EXT]], 16
120+
; AVX-NEXT: [[RETVAL_SROA_7_8_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_8_8_INSERT_INSERT]], [[RETVAL_SROA_7_8_INSERT_SHIFT]]
121+
; AVX-NEXT: [[RETVAL_SROA_5_8_INSERT_EXT:%.*]] = zext i16 [[TMP8]] to i64
122+
; AVX-NEXT: [[RETVAL_SROA_5_8_INSERT_INSERT:%.*]] = or i64 [[RETVAL_SROA_7_8_INSERT_INSERT]], [[RETVAL_SROA_5_8_INSERT_EXT]]
123+
; AVX-NEXT: [[DOTFCA_1_INSERT:%.*]] = insertvalue { i64, i64 } [[DOTFCA_0_INSERT]], i64 [[RETVAL_SROA_5_8_INSERT_INSERT]], 1
124+
; AVX-NEXT: ret { i64, i64 } [[DOTFCA_1_INSERT]]
125+
;
126+
entry:
127+
%0 = load i16, ptr %y, align 2
128+
%1 = load i16, ptr %x, align 2
129+
%2 = tail call i16 @llvm.smin.i16(i16 %0, i16 %1)
130+
%arrayidx.i.i.1 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 1
131+
%arrayidx.i.i10.1 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 1
132+
%3 = load i16, ptr %arrayidx.i.i10.1, align 2
133+
%4 = load i16, ptr %arrayidx.i.i.1, align 2
134+
%5 = tail call i16 @llvm.smin.i16(i16 %3, i16 %4)
135+
%arrayidx.i.i.2 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 2
136+
%arrayidx.i.i10.2 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 2
137+
%6 = load i16, ptr %arrayidx.i.i10.2, align 2
138+
%7 = load i16, ptr %arrayidx.i.i.2, align 2
139+
%8 = tail call i16 @llvm.smin.i16(i16 %6, i16 %7)
140+
%arrayidx.i.i.3 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 3
141+
%arrayidx.i.i10.3 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 3
142+
%9 = load i16, ptr %arrayidx.i.i10.3, align 2
143+
%10 = load i16, ptr %arrayidx.i.i.3, align 2
144+
%11 = tail call i16 @llvm.smin.i16(i16 %9, i16 %10)
145+
%arrayidx.i.i.4 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 4
146+
%arrayidx.i.i10.4 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 4
147+
%12 = load i16, ptr %arrayidx.i.i10.4, align 2
148+
%13 = load i16, ptr %arrayidx.i.i.4, align 2
149+
%14 = tail call i16 @llvm.smin.i16(i16 %12, i16 %13)
150+
%arrayidx.i.i.5 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 5
151+
%arrayidx.i.i10.5 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 5
152+
%15 = load i16, ptr %arrayidx.i.i10.5, align 2
153+
%16 = load i16, ptr %arrayidx.i.i.5, align 2
154+
%17 = tail call i16 @llvm.smin.i16(i16 %15, i16 %16)
155+
%arrayidx.i.i.6 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 6
156+
%arrayidx.i.i10.6 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 6
157+
%18 = load i16, ptr %arrayidx.i.i10.6, align 2
158+
%19 = load i16, ptr %arrayidx.i.i.6, align 2
159+
%20 = tail call i16 @llvm.smin.i16(i16 %18, i16 %19)
160+
%arrayidx.i.i.7 = getelementptr inbounds [8 x i16], ptr %x, i64 0, i64 7
161+
%arrayidx.i.i10.7 = getelementptr inbounds [8 x i16], ptr %y, i64 0, i64 7
162+
%21 = load i16, ptr %arrayidx.i.i10.7, align 2
163+
%22 = load i16, ptr %arrayidx.i.i.7, align 2
164+
%23 = tail call i16 @llvm.smin.i16(i16 %21, i16 %22)
165+
%retval.sroa.4.0.insert.ext = zext i16 %11 to i64
166+
%retval.sroa.4.0.insert.shift = shl nuw i64 %retval.sroa.4.0.insert.ext, 48
167+
%retval.sroa.3.0.insert.ext = zext i16 %8 to i64
168+
%retval.sroa.3.0.insert.shift = shl nuw nsw i64 %retval.sroa.3.0.insert.ext, 32
169+
%retval.sroa.3.0.insert.insert = or i64 %retval.sroa.4.0.insert.shift, %retval.sroa.3.0.insert.shift
170+
%retval.sroa.2.0.insert.ext = zext i16 %5 to i64
171+
%retval.sroa.2.0.insert.shift = shl nuw nsw i64 %retval.sroa.2.0.insert.ext, 16
172+
%retval.sroa.2.0.insert.insert = or i64 %retval.sroa.3.0.insert.insert, %retval.sroa.2.0.insert.shift
173+
%retval.sroa.0.0.insert.ext = zext i16 %2 to i64
174+
%retval.sroa.0.0.insert.insert = or i64 %retval.sroa.2.0.insert.insert, %retval.sroa.0.0.insert.ext
175+
%.fca.0.insert = insertvalue { i64, i64 } poison, i64 %retval.sroa.0.0.insert.insert, 0
176+
%retval.sroa.9.8.insert.ext = zext i16 %23 to i64
177+
%retval.sroa.9.8.insert.shift = shl nuw i64 %retval.sroa.9.8.insert.ext, 48
178+
%retval.sroa.8.8.insert.ext = zext i16 %20 to i64
179+
%retval.sroa.8.8.insert.shift = shl nuw nsw i64 %retval.sroa.8.8.insert.ext, 32
180+
%retval.sroa.8.8.insert.insert = or i64 %retval.sroa.9.8.insert.shift, %retval.sroa.8.8.insert.shift
181+
%retval.sroa.7.8.insert.ext = zext i16 %17 to i64
182+
%retval.sroa.7.8.insert.shift = shl nuw nsw i64 %retval.sroa.7.8.insert.ext, 16
183+
%retval.sroa.7.8.insert.insert = or i64 %retval.sroa.8.8.insert.insert, %retval.sroa.7.8.insert.shift
184+
%retval.sroa.5.8.insert.ext = zext i16 %14 to i64
185+
%retval.sroa.5.8.insert.insert = or i64 %retval.sroa.7.8.insert.insert, %retval.sroa.5.8.insert.ext
186+
%.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %retval.sroa.5.8.insert.insert, 1
187+
ret { i64, i64 } %.fca.1.insert
188+
}
189+
declare i16 @llvm.smin.i16(i16, i16)

0 commit comments

Comments
 (0)