Skip to content

Commit 4fef8c7

Browse files
committed
[X86] splitVectorOp - share the same SDLoc argument instead of recreating it over and over again.
1 parent 117537d commit 4fef8c7

File tree

1 file changed

+50
-45
lines changed

1 file changed

+50
-45
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 50 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -4070,10 +4070,9 @@ static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
40704070
}
40714071

40724072
/// Break an operation into 2 half sized ops and then concatenate the results.
4073-
static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
4073+
static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG, const SDLoc &dl) {
40744074
unsigned NumOps = Op.getNumOperands();
40754075
EVT VT = Op.getValueType();
4076-
SDLoc dl(Op);
40774076

40784077
// Extract the LHS Lo/Hi vectors
40794078
SmallVector<SDValue> LoOps(NumOps, SDValue());
@@ -4096,7 +4095,8 @@ static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
40964095

40974096
/// Break an unary integer operation into 2 half sized ops and then
40984097
/// concatenate the result back.
4099-
static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
4098+
static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG,
4099+
const SDLoc &dl) {
41004100
// Make sure we only try to split 256/512-bit types to avoid creating
41014101
// narrow vectors.
41024102
EVT VT = Op.getValueType();
@@ -4107,19 +4107,20 @@ static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
41074107
assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
41084108
VT.getVectorNumElements() &&
41094109
"Unexpected VTs!");
4110-
return splitVectorOp(Op, DAG);
4110+
return splitVectorOp(Op, DAG, dl);
41114111
}
41124112

41134113
/// Break a binary integer operation into 2 half sized ops and then
41144114
/// concatenate the result back.
4115-
static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
4115+
static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG,
4116+
const SDLoc &dl) {
41164117
// Assert that all the types match.
41174118
EVT VT = Op.getValueType();
41184119
(void)VT;
41194120
assert(Op.getOperand(0).getValueType() == VT &&
41204121
Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
41214122
assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4122-
return splitVectorOp(Op, DAG);
4123+
return splitVectorOp(Op, DAG, dl);
41234124
}
41244125

41254126
// Helper for splitting operands of an operation to legal target size and
@@ -20054,7 +20055,7 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
2005420055

2005520056
if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
2005620057
assert(InVT == MVT::v32i8 && "Unexpected VT!");
20057-
return splitVectorIntUnary(Op, DAG);
20058+
return splitVectorIntUnary(Op, DAG, dl);
2005820059
}
2005920060

2006020061
if (Subtarget.hasInt256())
@@ -20635,7 +20636,7 @@ SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
2063520636
if (Subtarget.hasAVX512()) {
2063620637
if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
2063720638
assert(VT == MVT::v32i8 && "Unexpected VT!");
20638-
return splitVectorIntUnary(Op, DAG);
20639+
return splitVectorIntUnary(Op, DAG, DL);
2063920640
}
2064020641

2064120642
// word to byte only under BWI. Otherwise we have to promoted to v16i32
@@ -21615,7 +21616,8 @@ SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
2161521616

2161621617
/// Depending on uarch and/or optimizing for size, we might prefer to use a
2161721618
/// vector operation in place of the typical scalar operation.
21618-
static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21619+
static SDValue lowerAddSubToHorizontalOp(SDValue Op, const SDLoc &DL,
21620+
SelectionDAG &DAG,
2161921621
const X86Subtarget &Subtarget) {
2162021622
// If both operands have other uses, this is probably not profitable.
2162121623
SDValue LHS = Op.getOperand(0);
@@ -21671,7 +21673,6 @@ static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
2167121673

2167221674
// Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
2167321675
// equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21674-
SDLoc DL(Op);
2167521676
if (BitWidth == 256 || BitWidth == 512) {
2167621677
unsigned LaneIdx = LExtIndex / NumEltsPerLane;
2167721678
X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
@@ -21692,7 +21693,7 @@ static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
2169221693
SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
2169321694
assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
2169421695
"Only expecting float/double");
21695-
return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21696+
return lowerAddSubToHorizontalOp(Op, SDLoc(Op), DAG, Subtarget);
2169621697
}
2169721698

2169821699
/// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
@@ -24449,7 +24450,7 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
2444924450

2445024451
if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
2445124452
assert(InVT == MVT::v32i8 && "Unexpected VT!");
24452-
return splitVectorIntUnary(Op, DAG);
24453+
return splitVectorIntUnary(Op, DAG, dl);
2445324454
}
2445424455

2445524456
if (Subtarget.hasInt256())
@@ -27812,7 +27813,7 @@ static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
2781227813
// Split vector, it's Lo and Hi parts will be handled in next iteration.
2781327814
if (NumElems > 16 ||
2781427815
(NumElems == 16 && !Subtarget.canExtendTo512DQ()))
27815-
return splitVectorIntUnary(Op, DAG);
27816+
return splitVectorIntUnary(Op, DAG, dl);
2781627817

2781727818
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
2781827819
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
@@ -27922,11 +27923,11 @@ static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
2792227923

2792327924
// Decompose 256-bit ops into smaller 128-bit ops.
2792427925
if (VT.is256BitVector() && !Subtarget.hasInt256())
27925-
return splitVectorIntUnary(Op, DAG);
27926+
return splitVectorIntUnary(Op, DAG, DL);
2792627927

2792727928
// Decompose 512-bit ops into smaller 256-bit ops.
2792827929
if (VT.is512BitVector() && !Subtarget.hasBWI())
27929-
return splitVectorIntUnary(Op, DAG);
27930+
return splitVectorIntUnary(Op, DAG, DL);
2793027931

2793127932
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
2793227933
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
@@ -27999,16 +28000,18 @@ static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
2799928000
static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
2800028001
const X86Subtarget &Subtarget) {
2800128002
MVT VT = Op.getSimpleValueType();
28003+
SDLoc DL(Op);
28004+
2800228005
if (VT == MVT::i16 || VT == MVT::i32)
28003-
return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
28006+
return lowerAddSubToHorizontalOp(Op, DL, DAG, Subtarget);
2800428007

2800528008
if (VT == MVT::v32i16 || VT == MVT::v64i8)
28006-
return splitVectorIntBinary(Op, DAG);
28009+
return splitVectorIntBinary(Op, DAG, DL);
2800728010

2800828011
assert(Op.getSimpleValueType().is256BitVector() &&
2800928012
Op.getSimpleValueType().isInteger() &&
2801028013
"Only handle AVX 256-bit vector integer operation");
28011-
return splitVectorIntBinary(Op, DAG);
28014+
return splitVectorIntBinary(Op, DAG, DL);
2801228015
}
2801328016

2801428017
static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
@@ -28022,7 +28025,7 @@ static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
2802228025
(VT.is256BitVector() && !Subtarget.hasInt256())) {
2802328026
assert(Op.getSimpleValueType().isInteger() &&
2802428027
"Only handle AVX vector integer operation");
28025-
return splitVectorIntBinary(Op, DAG);
28028+
return splitVectorIntBinary(Op, DAG, DL);
2802628029
}
2802728030

2802828031
// Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
@@ -28084,10 +28087,11 @@ static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
2808428087
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
2808528088
SelectionDAG &DAG) {
2808628089
MVT VT = Op.getSimpleValueType();
28090+
SDLoc DL(Op);
28091+
2808728092
if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
2808828093
// Since X86 does not have CMOV for 8-bit integer, we don't convert
2808928094
// 8-bit integer abs to NEG and CMOV.
28090-
SDLoc DL(Op);
2809128095
SDValue N0 = Op.getOperand(0);
2809228096
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
2809328097
DAG.getConstant(0, DL, VT), N0);
@@ -28098,7 +28102,6 @@ static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
2809828102

2809928103
// ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
2810028104
if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
28101-
SDLoc DL(Op);
2810228105
SDValue Src = Op.getOperand(0);
2810328106
SDValue Sub =
2810428107
DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
@@ -28108,11 +28111,11 @@ static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
2810828111
if (VT.is256BitVector() && !Subtarget.hasInt256()) {
2810928112
assert(VT.isInteger() &&
2811028113
"Only handle AVX 256-bit vector integer operation");
28111-
return splitVectorIntUnary(Op, DAG);
28114+
return splitVectorIntUnary(Op, DAG, DL);
2811228115
}
2811328116

2811428117
if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28115-
return splitVectorIntUnary(Op, DAG);
28118+
return splitVectorIntUnary(Op, DAG, DL);
2811628119

2811728120
// Default to expand.
2811828121
return SDValue();
@@ -28121,13 +28124,14 @@ static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
2812128124
static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
2812228125
SelectionDAG &DAG) {
2812328126
MVT VT = Op.getSimpleValueType();
28127+
SDLoc DL(Op);
2812428128

2812528129
// For AVX1 cases, split to use legal ops.
2812628130
if (VT.is256BitVector() && !Subtarget.hasInt256())
28127-
return splitVectorIntBinary(Op, DAG);
28131+
return splitVectorIntBinary(Op, DAG, DL);
2812828132

2812928133
if (VT == MVT::v32i16 || VT == MVT::v64i8)
28130-
return splitVectorIntBinary(Op, DAG);
28134+
return splitVectorIntBinary(Op, DAG, DL);
2813128135

2813228136
// Default to expand.
2813328137
return SDValue();
@@ -28136,13 +28140,14 @@ static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
2813628140
static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
2813728141
SelectionDAG &DAG) {
2813828142
MVT VT = Op.getSimpleValueType();
28143+
SDLoc DL(Op);
2813928144

2814028145
// For AVX1 cases, split to use legal ops.
2814128146
if (VT.is256BitVector() && !Subtarget.hasInt256())
28142-
return splitVectorIntBinary(Op, DAG);
28147+
return splitVectorIntBinary(Op, DAG, DL);
2814328148

2814428149
if (VT == MVT::v32i16 || VT == MVT::v64i8)
28145-
return splitVectorIntBinary(Op, DAG);
28150+
return splitVectorIntBinary(Op, DAG, DL);
2814628151

2814728152
// Default to expand.
2814828153
return SDValue();
@@ -28299,15 +28304,15 @@ static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
2829928304
static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
2830028305
SelectionDAG &DAG) {
2830128306
MVT VT = Op.getSimpleValueType();
28307+
SDLoc dl(Op);
2830228308

2830328309
// For AVX1 cases, split to use legal ops.
2830428310
if (VT.is256BitVector() && !Subtarget.hasInt256())
28305-
return splitVectorIntBinary(Op, DAG);
28311+
return splitVectorIntBinary(Op, DAG, dl);
2830628312

2830728313
if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.useBWIRegs())
28308-
return splitVectorIntBinary(Op, DAG);
28314+
return splitVectorIntBinary(Op, DAG, dl);
2830928315

28310-
SDLoc dl(Op);
2831128316
bool IsSigned = Op.getOpcode() == ISD::ABDS;
2831228317
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2831328318

@@ -28350,10 +28355,10 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
2835028355

2835128356
// Decompose 256-bit ops into 128-bit ops.
2835228357
if (VT.is256BitVector() && !Subtarget.hasInt256())
28353-
return splitVectorIntBinary(Op, DAG);
28358+
return splitVectorIntBinary(Op, DAG, dl);
2835428359

2835528360
if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28356-
return splitVectorIntBinary(Op, DAG);
28361+
return splitVectorIntBinary(Op, DAG, dl);
2835728362

2835828363
SDValue A = Op.getOperand(0);
2835928364
SDValue B = Op.getOperand(1);
@@ -28576,10 +28581,10 @@ static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
2857628581

2857728582
// Decompose 256-bit ops into 128-bit ops.
2857828583
if (VT.is256BitVector() && !Subtarget.hasInt256())
28579-
return splitVectorIntBinary(Op, DAG);
28584+
return splitVectorIntBinary(Op, DAG, dl);
2858028585

2858128586
if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28582-
return splitVectorIntBinary(Op, DAG);
28587+
return splitVectorIntBinary(Op, DAG, dl);
2858328588

2858428589
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
2858528590
assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
@@ -29757,10 +29762,10 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
2975729762

2975829763
// Decompose 256-bit shifts into 128-bit shifts.
2975929764
if (VT.is256BitVector())
29760-
return splitVectorIntBinary(Op, DAG);
29765+
return splitVectorIntBinary(Op, DAG, dl);
2976129766

2976229767
if (VT == MVT::v32i16 || VT == MVT::v64i8)
29763-
return splitVectorIntBinary(Op, DAG);
29768+
return splitVectorIntBinary(Op, DAG, dl);
2976429769

2976529770
return SDValue();
2976629771
}
@@ -29837,7 +29842,7 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
2983729842
EltSizeInBits < 32)) {
2983829843
// Pre-mask the amount modulo using the wider vector.
2983929844
Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
29840-
return splitVectorOp(Op, DAG);
29845+
return splitVectorOp(Op, DAG, DL);
2984129846
}
2984229847

2984329848
// Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
@@ -29999,7 +30004,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
2999930004

3000030005
// Split 256-bit integers on XOP/pre-AVX2 targets.
3000130006
if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
30002-
return splitVectorIntBinary(Op, DAG);
30007+
return splitVectorIntBinary(Op, DAG, DL);
3000330008

3000430009
// XOP has 128-bit vector variable + immediate rotates.
3000530010
// +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
@@ -30035,7 +30040,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
3003530040

3003630041
// Split 512-bit integers on non 512-bit BWI targets.
3003730042
if (VT.is512BitVector() && !Subtarget.useBWIRegs())
30038-
return splitVectorIntBinary(Op, DAG);
30043+
return splitVectorIntBinary(Op, DAG, DL);
3003930044

3004030045
assert(
3004130046
(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
@@ -31115,11 +31120,11 @@ static SDValue LowerVectorCTPOP(SDValue Op, const SDLoc &DL,
3111531120

3111631121
// Decompose 256-bit ops into smaller 128-bit ops.
3111731122
if (VT.is256BitVector() && !Subtarget.hasInt256())
31118-
return splitVectorIntUnary(Op, DAG);
31123+
return splitVectorIntUnary(Op, DAG, DL);
3111931124

3112031125
// Decompose 512-bit ops into smaller 256-bit ops.
3112131126
if (VT.is512BitVector() && !Subtarget.hasBWI())
31122-
return splitVectorIntUnary(Op, DAG);
31127+
return splitVectorIntUnary(Op, DAG, DL);
3112331128

3112431129
// For element types greater than i8, do vXi8 pop counts and a bytesum.
3112531130
if (VT.getScalarType() != MVT::i8) {
@@ -31243,7 +31248,7 @@ static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
3124331248

3124431249
// Decompose 256-bit ops into smaller 128-bit ops.
3124531250
if (VT.is256BitVector())
31246-
return splitVectorIntUnary(Op, DAG);
31251+
return splitVectorIntUnary(Op, DAG, DL);
3124731252

3124831253
assert(VT.is128BitVector() &&
3124931254
"Only 128-bit vector bitreverse lowering supported.");
@@ -31282,11 +31287,11 @@ static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
3128231287

3128331288
// Split 512-bit ops without BWI so that we can still use the PSHUFB lowering.
3128431289
if (VT.is512BitVector() && !Subtarget.hasBWI())
31285-
return splitVectorIntUnary(Op, DAG);
31290+
return splitVectorIntUnary(Op, DAG, DL);
3128631291

3128731292
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
3128831293
if (VT.is256BitVector() && !Subtarget.hasInt256())
31289-
return splitVectorIntUnary(Op, DAG);
31294+
return splitVectorIntUnary(Op, DAG, DL);
3129031295

3129131296
// Lower vXi16/vXi32/vXi64 as BSWAP + vXi8 BITREVERSE.
3129231297
if (VT.getScalarType() != MVT::i8) {
@@ -55933,7 +55938,7 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
5593355938
if (isConcatenatedNot(InVecBC.getOperand(0)) ||
5593455939
isConcatenatedNot(InVecBC.getOperand(1))) {
5593555940
// extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55936-
SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55941+
SDValue Concat = splitVectorIntBinary(InVecBC, DAG, SDLoc(InVecBC));
5593755942
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
5593855943
DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
5593955944
}

0 commit comments

Comments
 (0)