@@ -5,6 +5,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
5
5
; Instcombine should be able to eliminate all of these ext casts.
6
6
7
7
declare void @use (i32 )
8
+ declare void @use.i8 (i8 )
8
9
declare void @use_vec (<2 x i32 >)
9
10
10
11
define i64 @test1 (i64 %a ) {
@@ -217,8 +218,8 @@ define i16 @ashr_mul(i8 %X, i8 %Y) {
217
218
define i32 @trunc_ashr (i32 %X ) {
218
219
; CHECK-LABEL: @trunc_ashr(
219
220
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 8
220
- ; CHECK-NEXT: [[C :%.*]] = or i32 [[TMP1]], -8388608
221
- ; CHECK-NEXT: ret i32 [[C ]]
221
+ ; CHECK-NEXT: [[TMP2 :%.*]] = or i32 [[TMP1]], -8388608
222
+ ; CHECK-NEXT: ret i32 [[TMP2 ]]
222
223
;
223
224
%A = zext i32 %X to i36
224
225
%B = or i36 %A , -2147483648 ; 0xF80000000
@@ -230,8 +231,8 @@ define i32 @trunc_ashr(i32 %X) {
230
231
define <2 x i32 > @trunc_ashr_vec (<2 x i32 > %X ) {
231
232
; CHECK-LABEL: @trunc_ashr_vec(
232
233
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 8, i32 8>
233
- ; CHECK-NEXT: [[C :%.*]] = or <2 x i32> [[TMP1]], <i32 -8388608, i32 -8388608>
234
- ; CHECK-NEXT: ret <2 x i32> [[C ]]
234
+ ; CHECK-NEXT: [[TMP2 :%.*]] = or <2 x i32> [[TMP1]], <i32 -8388608, i32 -8388608>
235
+ ; CHECK-NEXT: ret <2 x i32> [[TMP2 ]]
235
236
;
236
237
%A = zext <2 x i32 > %X to <2 x i36 >
237
238
%B = or <2 x i36 > %A , <i36 -2147483648 , i36 -2147483648 > ; 0xF80000000
@@ -305,8 +306,8 @@ define <2 x i64> @test8_vec_poison(<2 x i32> %A, <2 x i32> %B) {
305
306
; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
306
307
; CHECK-NEXT: [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i64>
307
308
; CHECK-NEXT: [[E:%.*]] = shl nuw <2 x i64> [[D]], <i64 32, i64 poison>
308
- ; CHECK-NEXT: [[G :%.*]] = or disjoint <2 x i64> [[E]], [[C]]
309
- ; CHECK-NEXT: ret <2 x i64> [[G ]]
309
+ ; CHECK-NEXT: [[F :%.*]] = or disjoint <2 x i64> [[E]], [[C]]
310
+ ; CHECK-NEXT: ret <2 x i64> [[F ]]
310
311
;
311
312
%C = zext <2 x i32 > %A to <2 x i128 >
312
313
%D = zext <2 x i32 > %B to <2 x i128 >
@@ -392,8 +393,8 @@ define <2 x i64> @test11_vec_poison(<2 x i32> %A, <2 x i32> %B) {
392
393
; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
393
394
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
394
395
; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
395
- ; CHECK-NEXT: [[G :%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
396
- ; CHECK-NEXT: ret <2 x i64> [[G ]]
396
+ ; CHECK-NEXT: [[F :%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
397
+ ; CHECK-NEXT: ret <2 x i64> [[F ]]
397
398
;
398
399
%C = zext <2 x i32 > %A to <2 x i128 >
399
400
%D = zext <2 x i32 > %B to <2 x i128 >
@@ -456,8 +457,8 @@ define <2 x i64> @test12_vec_poison(<2 x i32> %A, <2 x i32> %B) {
456
457
; CHECK-NEXT: [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
457
458
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
458
459
; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
459
- ; CHECK-NEXT: [[G :%.*]] = lshr <2 x i64> [[C]], [[E]]
460
- ; CHECK-NEXT: ret <2 x i64> [[G ]]
460
+ ; CHECK-NEXT: [[F :%.*]] = lshr <2 x i64> [[C]], [[E]]
461
+ ; CHECK-NEXT: ret <2 x i64> [[F ]]
461
462
;
462
463
%C = zext <2 x i32 > %A to <2 x i128 >
463
464
%D = zext <2 x i32 > %B to <2 x i128 >
@@ -520,8 +521,8 @@ define <2 x i64> @test13_vec_poison(<2 x i32> %A, <2 x i32> %B) {
520
521
; CHECK-NEXT: [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
521
522
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 poison>
522
523
; CHECK-NEXT: [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
523
- ; CHECK-NEXT: [[G :%.*]] = ashr <2 x i64> [[C]], [[E]]
524
- ; CHECK-NEXT: ret <2 x i64> [[G ]]
524
+ ; CHECK-NEXT: [[F :%.*]] = ashr <2 x i64> [[C]], [[E]]
525
+ ; CHECK-NEXT: ret <2 x i64> [[F ]]
525
526
;
526
527
%C = sext <2 x i32 > %A to <2 x i128 >
527
528
%D = zext <2 x i32 > %B to <2 x i128 >
@@ -1034,8 +1035,8 @@ define i8 @drop_nsw_trunc(i16 %x, i16 %y) {
1034
1035
define i8 @drop_nuw_trunc (i16 %x , i16 %y ) {
1035
1036
; CHECK-LABEL: @drop_nuw_trunc(
1036
1037
; CHECK-NEXT: [[AND2:%.*]] = and i16 [[X:%.*]], [[Y:%.*]]
1037
- ; CHECK-NEXT: [[B :%.*]] = trunc i16 [[AND2]] to i8
1038
- ; CHECK-NEXT: ret i8 [[B ]]
1038
+ ; CHECK-NEXT: [[RES :%.*]] = trunc i16 [[AND2]] to i8
1039
+ ; CHECK-NEXT: ret i8 [[RES ]]
1039
1040
;
1040
1041
%and = and i16 %x , 255
1041
1042
%and2 = and i16 %and , %y
@@ -1095,3 +1096,31 @@ define <2 x i1> @trunc_nuw_xor_vector(<2 x i8> %x, <2 x i8> %y) {
1095
1096
%r = trunc nuw <2 x i8 > %xor to <2 x i1 >
1096
1097
ret <2 x i1 > %r
1097
1098
}
1099
+
1100
+ ; FIXME: This is a miscompile.
1101
+ define void @pr95547 (i32 %x ) {
1102
+ ; CHECK-LABEL: @pr95547(
1103
+ ; CHECK-NEXT: [[X_TRUNC:%.*]] = trunc i32 [[X:%.*]] to i8
1104
+ ; CHECK-NEXT: [[DIV:%.*]] = udiv i8 11, [[X_TRUNC]]
1105
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], 256
1106
+ ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP:%.*]], label [[EXIT:%.*]]
1107
+ ; CHECK: loop:
1108
+ ; CHECK-NEXT: call void @use.i8(i8 [[DIV]])
1109
+ ; CHECK-NEXT: br label [[LOOP]]
1110
+ ; CHECK: exit:
1111
+ ; CHECK-NEXT: ret void
1112
+ ;
1113
+ %x.trunc = trunc i32 %x to i16
1114
+ %div = udiv i16 11 , %x.trunc
1115
+ %cmp = icmp ult i32 %x , 256
1116
+ br i1 %cmp , label %loop , label %exit
1117
+
1118
+ loop:
1119
+ ; The loop is just here to prevent sinking.
1120
+ %trunc = trunc i16 %div to i8
1121
+ call void @use.i8 (i8 %trunc )
1122
+ br label %loop
1123
+
1124
+ exit:
1125
+ ret void
1126
+ }
0 commit comments