@@ -1680,6 +1680,90 @@ define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) {
1680
1680
ret i8 %r
1681
1681
}
1682
1682
1683
+ define i16 @signbit_splat_mask (i8 %x , i16 %y ) {
1684
+ ; CHECK-LABEL: @signbit_splat_mask(
1685
+ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
1686
+ ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
1687
+ ; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
1688
+ ; CHECK-NEXT: ret i16 [[R]]
1689
+ ;
1690
+ %a = ashr i8 %x , 7
1691
+ %s = sext i8 %a to i16
1692
+ %r = and i16 %s , %y
1693
+ ret i16 %r
1694
+ }
1695
+
1696
+ define <2 x i16 > @signbit_splat_mask_commute (<2 x i5 > %x , <2 x i16 > %p ) {
1697
+ ; CHECK-LABEL: @signbit_splat_mask_commute(
1698
+ ; CHECK-NEXT: [[Y:%.*]] = mul <2 x i16> [[P:%.*]], [[P]]
1699
+ ; CHECK-NEXT: [[A:%.*]] = ashr <2 x i5> [[X:%.*]], <i5 4, i5 poison>
1700
+ ; CHECK-NEXT: [[S:%.*]] = sext <2 x i5> [[A]] to <2 x i16>
1701
+ ; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[Y]], [[S]]
1702
+ ; CHECK-NEXT: ret <2 x i16> [[R]]
1703
+ ;
1704
+ %y = mul <2 x i16 > %p , %p ; thwart complexity-based canonicalization
1705
+ %a = ashr <2 x i5 > %x , <i5 4 , i5 poison>
1706
+ %s = sext <2 x i5 > %a to <2 x i16 >
1707
+ %r = and <2 x i16 > %y , %s
1708
+ ret <2 x i16 > %r
1709
+ }
1710
+
1711
+ define i16 @signbit_splat_mask_use1 (i8 %x , i16 %y ) {
1712
+ ; CHECK-LABEL: @signbit_splat_mask_use1(
1713
+ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
1714
+ ; CHECK-NEXT: call void @use8(i8 [[A]])
1715
+ ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
1716
+ ; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
1717
+ ; CHECK-NEXT: ret i16 [[R]]
1718
+ ;
1719
+ %a = ashr i8 %x , 7
1720
+ call void @use8 (i8 %a )
1721
+ %s = sext i8 %a to i16
1722
+ %r = and i16 %s , %y
1723
+ ret i16 %r
1724
+ }
1725
+
1726
+ define i16 @signbit_splat_mask_use2 (i8 %x , i16 %y ) {
1727
+ ; CHECK-LABEL: @signbit_splat_mask_use2(
1728
+ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
1729
+ ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
1730
+ ; CHECK-NEXT: call void @use16(i16 [[S]])
1731
+ ; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
1732
+ ; CHECK-NEXT: ret i16 [[R]]
1733
+ ;
1734
+ %a = ashr i8 %x , 7
1735
+ %s = sext i8 %a to i16
1736
+ call void @use16 (i16 %s )
1737
+ %r = and i16 %s , %y
1738
+ ret i16 %r
1739
+ }
1740
+
1741
+ define i16 @not_signbit_splat_mask1 (i8 %x , i16 %y ) {
1742
+ ; CHECK-LABEL: @not_signbit_splat_mask1(
1743
+ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
1744
+ ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[A]] to i16
1745
+ ; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]]
1746
+ ; CHECK-NEXT: ret i16 [[R]]
1747
+ ;
1748
+ %a = ashr i8 %x , 7
1749
+ %z = zext i8 %a to i16
1750
+ %r = and i16 %z , %y
1751
+ ret i16 %r
1752
+ }
1753
+
1754
+ define i16 @not_signbit_splat_mask2 (i8 %x , i16 %y ) {
1755
+ ; CHECK-LABEL: @not_signbit_splat_mask2(
1756
+ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6
1757
+ ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16
1758
+ ; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
1759
+ ; CHECK-NEXT: ret i16 [[R]]
1760
+ ;
1761
+ %a = ashr i8 %x , 6
1762
+ %s = sext i8 %a to i16
1763
+ %r = and i16 %s , %y
1764
+ ret i16 %r
1765
+ }
1766
+
1683
1767
define i8 @not_ashr_bitwidth_mask (i8 %x , i8 %y ) {
1684
1768
; CHECK-LABEL: @not_ashr_bitwidth_mask(
1685
1769
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
0 commit comments