diff options
Diffstat (limited to 'lib/Target/X86')
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 172 | ||||
-rw-r--r-- | lib/Target/X86/X86TargetTransformInfo.cpp | 630 | ||||
-rw-r--r-- | lib/Target/X86/X86TargetTransformInfo.h | 3 |
3 files changed, 437 insertions, 368 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index fd2189397279..7f72ab17f619 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16985,10 +16985,9 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst); } - if (Cond.getOpcode() == ISD::SETCC) { + if (Cond.getOpcode() == ISD::SETCC) if (SDValue NewCond = LowerSETCC(Cond, DAG)) Cond = NewCond; - } // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y @@ -18289,6 +18288,7 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT, /// constant. Takes immediate version of shift as input. static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT, SDValue SrcOp, SDValue ShAmt, + const X86Subtarget &Subtarget, SelectionDAG &DAG) { MVT SVT = ShAmt.getSimpleValueType(); assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!"); @@ -18306,27 +18306,32 @@ static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT, case X86ISD::VSRAI: Opc = X86ISD::VSRA; break; } - const X86Subtarget &Subtarget = - static_cast<const X86Subtarget &>(DAG.getSubtarget()); - if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND && - ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) { - // Let the shuffle legalizer expand this shift amount node. + // Need to build a vector containing shift amount. + // SSE/AVX packed shifts only use the lower 64-bit of the shift count. + // +=================+============+=======================================+ + // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as | + // +=================+============+=======================================+ + // | i64 | Yes, No | Use ShAmt as lowest elt | + // | i32 | Yes | zero-extend in-reg | + // | (i32 zext(i16)) | Yes | zero-extend in-reg | + // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) | + // +=================+============+=======================================+ + + if (SVT == MVT::i64) + ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt); + else if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND && + ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) { SDValue Op0 = ShAmt.getOperand(0); Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0); - ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, Subtarget, DAG); + ShAmt = DAG.getZeroExtendVectorInReg(Op0, SDLoc(Op0), MVT::v2i64); + } else if (Subtarget.hasSSE41() && + ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt); + ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64); } else { - // Need to build a vector containing shift amount. - // SSE/AVX packed shifts only use the lower 64-bit of the shift count. - SmallVector<SDValue, 4> ShOps; - ShOps.push_back(ShAmt); - if (SVT == MVT::i32) { - ShOps.push_back(DAG.getConstant(0, dl, SVT)); - ShOps.push_back(DAG.getUNDEF(SVT)); - } - ShOps.push_back(DAG.getUNDEF(SVT)); - - MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64; - ShAmt = DAG.getBuildVector(BVT, dl, ShOps); + SmallVector<SDValue, 4> ShOps = {ShAmt, DAG.getConstant(0, dl, SVT), + DAG.getUNDEF(SVT), DAG.getUNDEF(SVT)}; + ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps); } // The return type has to be a 128-bit type with the same element @@ -19014,7 +19019,8 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget &Subtarget } case VSHIFT: return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(), - Op.getOperand(1), Op.getOperand(2), DAG); + Op.getOperand(1), Op.getOperand(2), Subtarget, + DAG); case COMPRESS_EXPAND_IN_REG: { SDValue Mask = Op.getOperand(3); SDValue DataToCompress = Op.getOperand(1); @@ -21276,7 +21282,7 @@ static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, else if (EltVT.bitsLT(MVT::i32)) BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); - return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, DAG); + return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG); } } @@ -25951,12 +25957,11 @@ bool X86TargetLowering::isGAPlusOffset(SDNode *N, // instructions. // TODO: Investigate sharing more of this with shuffle lowering. static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, + bool FloatDomain, const X86Subtarget &Subtarget, unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) { unsigned NumMaskElts = Mask.size(); unsigned MaskEltSize = MaskVT.getScalarSizeInBits(); - bool FloatDomain = MaskVT.isFloatingPoint() || - (!Subtarget.hasAVX2() && MaskVT.is256BitVector()); // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS). if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) && @@ -26067,11 +26072,11 @@ static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, // permute instructions. // TODO: Investigate sharing more of this with shuffle lowering. static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, + bool FloatDomain, const X86Subtarget &Subtarget, unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) { unsigned NumMaskElts = Mask.size(); - bool FloatDomain = MaskVT.isFloatingPoint(); bool ContainsZeros = false; SmallBitVector Zeroable(NumMaskElts, false); @@ -26211,11 +26216,10 @@ static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, // shuffle instructions. // TODO: Investigate sharing more of this with shuffle lowering. static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, - SDValue &V1, SDValue &V2, + bool FloatDomain, SDValue &V1, SDValue &V2, const X86Subtarget &Subtarget, unsigned &Shuffle, MVT &ShuffleVT, bool IsUnary) { - bool FloatDomain = MaskVT.isFloatingPoint(); unsigned EltSizeInBits = MaskVT.getScalarSizeInBits(); if (MaskVT.is128BitVector()) { @@ -26310,13 +26314,13 @@ static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, } static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, + bool FloatDomain, SDValue &V1, SDValue &V2, SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget, unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) { unsigned NumMaskElts = Mask.size(); - bool FloatDomain = MaskVT.isFloatingPoint(); // Attempt to match against PALIGNR byte rotate. if (!FloatDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) || @@ -26594,8 +26598,8 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root, } } - if (matchUnaryVectorShuffle(MaskVT, Mask, Subtarget, Shuffle, ShuffleSrcVT, - ShuffleVT)) { + if (matchUnaryVectorShuffle(MaskVT, Mask, FloatDomain, Subtarget, Shuffle, + ShuffleSrcVT, ShuffleVT)) { if (Depth == 1 && Root.getOpcode() == Shuffle) return false; // Nothing to do! if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements())) @@ -26609,8 +26613,8 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root, return true; } - if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, Subtarget, Shuffle, - ShuffleVT, PermuteImm)) { + if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, FloatDomain, Subtarget, + Shuffle, ShuffleVT, PermuteImm)) { if (Depth == 1 && Root.getOpcode() == Shuffle) return false; // Nothing to do! if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements())) @@ -26626,8 +26630,8 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root, } } - if (matchBinaryVectorShuffle(MaskVT, Mask, V1, V2, Subtarget, Shuffle, - ShuffleVT, UnaryShuffle)) { + if (matchBinaryVectorShuffle(MaskVT, Mask, FloatDomain, V1, V2, Subtarget, + Shuffle, ShuffleVT, UnaryShuffle)) { if (Depth == 1 && Root.getOpcode() == Shuffle) return false; // Nothing to do! if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements())) @@ -26643,8 +26647,9 @@ static bool combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root, return true; } - if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, V1, V2, DL, DAG, Subtarget, - Shuffle, ShuffleVT, PermuteImm)) { + if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, FloatDomain, V1, V2, DL, + DAG, Subtarget, Shuffle, ShuffleVT, + PermuteImm)) { if (Depth == 1 && Root.getOpcode() == Shuffle) return false; // Nothing to do! if (IsEVEXShuffle && (NumRootElts != ShuffleVT.getVectorNumElements())) @@ -28742,6 +28747,27 @@ static bool combineBitcastForMaskedOp(SDValue OrigOp, SelectionDAG &DAG, DAG.getConstant(Imm, DL, MVT::i8))); return true; } + case ISD::EXTRACT_SUBVECTOR: { + unsigned EltSize = EltVT.getSizeInBits(); + if (EltSize != 32 && EltSize != 64) + return false; + MVT OpEltVT = Op.getSimpleValueType().getVectorElementType(); + // Only change element size, not type. + if (VT.isInteger() != OpEltVT.isInteger()) + return false; + uint64_t Imm = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); + Imm = (Imm * OpEltVT.getSizeInBits()) / EltSize; + // Op0 needs to be bitcasted to a larger vector with the same element type. + SDValue Op0 = Op.getOperand(0); + MVT Op0VT = MVT::getVectorVT(EltVT, + Op0.getSimpleValueType().getSizeInBits() / EltSize); + Op0 = DAG.getBitcast(Op0VT, Op0); + DCI.AddToWorklist(Op0.getNode()); + DCI.CombineTo(OrigOp.getNode(), + DAG.getNode(Opcode, DL, VT, Op0, + DAG.getConstant(Imm, DL, MVT::i8))); + return true; + } } return false; @@ -30921,6 +30947,59 @@ static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones); } +/// Check if truncation with saturation form type \p SrcVT to \p DstVT +/// is valid for the given \p Subtarget. +static bool +isSATValidOnSubtarget(EVT SrcVT, EVT DstVT, const X86Subtarget &Subtarget) { + if (!Subtarget.hasAVX512()) + return false; + EVT SrcElVT = SrcVT.getScalarType(); + EVT DstElVT = DstVT.getScalarType(); + if (SrcElVT.getSizeInBits() < 16 || SrcElVT.getSizeInBits() > 64) + return false; + if (DstElVT.getSizeInBits() < 8 || DstElVT.getSizeInBits() > 32) + return false; + if (SrcVT.is512BitVector() || Subtarget.hasVLX()) + return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI(); + return false; +} + +/// Detect a pattern of truncation with saturation: +/// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type). +/// Return the source value to be truncated or SDValue() if the pattern was not +/// matched or the unsupported on the current target. +static SDValue +detectUSatPattern(SDValue In, EVT VT, const X86Subtarget &Subtarget) { + if (In.getOpcode() != ISD::UMIN) + return SDValue(); + + EVT InVT = In.getValueType(); + // FIXME: Scalar type may be supported if we move it to vector register. + if (!InVT.isVector() || !InVT.isSimple()) + return SDValue(); + + if (!isSATValidOnSubtarget(InVT, VT, Subtarget)) + return SDValue(); + + //Saturation with truncation. We truncate from InVT to VT. + assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && + "Unexpected types for truncate operation"); + + SDValue SrcVal; + APInt C; + if (ISD::isConstantSplatVector(In.getOperand(0).getNode(), C)) + SrcVal = In.getOperand(1); + else if (ISD::isConstantSplatVector(In.getOperand(1).getNode(), C)) + SrcVal = In.getOperand(0); + else + return SDValue(); + + // C should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according + // the element size of the destination type. + return (C == ((uint64_t)1 << VT.getScalarSizeInBits()) - 1) ? + SrcVal : SDValue(); +} + /// This function detects the AVG pattern between vectors of unsigned i8/i16, /// which is c = (a + b + 1) / 2, and replace this operation with the efficient /// X86ISD::AVG instruction. @@ -31487,6 +31566,12 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG, St->getPointerInfo(), St->getAlignment(), St->getMemOperand()->getFlags()); + if (SDValue Val = + detectUSatPattern(St->getValue(), St->getMemoryVT(), Subtarget)) + return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(), + dl, Val, St->getBasePtr(), + St->getMemoryVT(), St->getMemOperand(), DAG); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned NumElems = VT.getVectorNumElements(); assert(StVT != VT && "Cannot truncate to the same type"); @@ -31967,7 +32052,8 @@ combineVectorTruncationWithPACKUS(SDNode *N, SelectionDAG &DAG, /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS. static SDValue -combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG, +combineVectorTruncationWithPACKSS(SDNode *N, const X86Subtarget &Subtarget, + SelectionDAG &DAG, SmallVector<SDValue, 8> &Regs) { assert(Regs.size() > 0 && Regs[0].getValueType() == MVT::v4i32); EVT OutVT = N->getValueType(0); @@ -31976,8 +32062,10 @@ combineVectorTruncationWithPACKSS(SDNode *N, SelectionDAG &DAG, // Shift left by 16 bits, then arithmetic-shift right by 16 bits. SDValue ShAmt = DAG.getConstant(16, DL, MVT::i32); for (auto &Reg : Regs) { - Reg = getTargetVShiftNode(X86ISD::VSHLI, DL, MVT::v4i32, Reg, ShAmt, DAG); - Reg = getTargetVShiftNode(X86ISD::VSRAI, DL, MVT::v4i32, Reg, ShAmt, DAG); + Reg = getTargetVShiftNode(X86ISD::VSHLI, DL, MVT::v4i32, Reg, ShAmt, + Subtarget, DAG); + Reg = getTargetVShiftNode(X86ISD::VSRAI, DL, MVT::v4i32, Reg, ShAmt, + Subtarget, DAG); } for (unsigned i = 0, e = Regs.size() / 2; i < e; i++) @@ -32046,7 +32134,7 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG, if (Subtarget.hasSSE41() || OutSVT == MVT::i8) return combineVectorTruncationWithPACKUS(N, DAG, SubVec); else if (InSVT == MVT::i32) - return combineVectorTruncationWithPACKSS(N, DAG, SubVec); + return combineVectorTruncationWithPACKSS(N, Subtarget, DAG, SubVec); else return SDValue(); } @@ -32104,6 +32192,10 @@ static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG, if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL)) return Avg; + // Try the truncation with unsigned saturation. + if (SDValue Val = detectUSatPattern(Src, VT, Subtarget)) + return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Val); + // The bitcast source is a direct mmx result. // Detect bitcasts between i32 to x86mmx if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) { diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index d7792e296a58..de4839432b9a 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -80,9 +80,12 @@ unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { if (Vector) { - if (ST->hasAVX512()) return 512; - if (ST->hasAVX()) return 256; - if (ST->hasSSE1()) return 128; + if (ST->hasAVX512()) + return 512; + if (ST->hasAVX()) + return 256; + if (ST->hasSSE1()) + return 128; return 0; } @@ -211,11 +214,9 @@ int X86TTIImpl::getArithmeticInstrCost( }; // Look for AVX512DQ lowering tricks for custom cases. - if (ST->hasDQI()) { - if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, - LT.second)) + if (ST->hasDQI()) + if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVX512BWCostTable[] = { { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. @@ -225,37 +226,38 @@ int X86TTIImpl::getArithmeticInstrCost( // Vectorizing division is a bad idea. See the SSE2 table for more comments. { ISD::SDIV, MVT::v64i8, 64*20 }, { ISD::SDIV, MVT::v32i16, 32*20 }, - { ISD::SDIV, MVT::v16i32, 16*20 }, - { ISD::SDIV, MVT::v8i64, 8*20 }, { ISD::UDIV, MVT::v64i8, 64*20 }, - { ISD::UDIV, MVT::v32i16, 32*20 }, - { ISD::UDIV, MVT::v16i32, 16*20 }, - { ISD::UDIV, MVT::v8i64, 8*20 }, + { ISD::UDIV, MVT::v32i16, 32*20 } }; // Look for AVX512BW lowering tricks for custom cases. - if (ST->hasBWI()) { - if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, - LT.second)) + if (ST->hasBWI()) + if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVX512CostTable[] = { - { ISD::SHL, MVT::v16i32, 1 }, - { ISD::SRL, MVT::v16i32, 1 }, - { ISD::SRA, MVT::v16i32, 1 }, - { ISD::SHL, MVT::v8i64, 1 }, - { ISD::SRL, MVT::v8i64, 1 }, - { ISD::SRA, MVT::v8i64, 1 }, - - { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. - { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. + { ISD::SHL, MVT::v16i32, 1 }, + { ISD::SRL, MVT::v16i32, 1 }, + { ISD::SRA, MVT::v16i32, 1 }, + { ISD::SHL, MVT::v8i64, 1 }, + { ISD::SRL, MVT::v8i64, 1 }, + { ISD::SRA, MVT::v8i64, 1 }, + + { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i32, 1 }, // pmulld + { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add + + // Vectorizing division is a bad idea. See the SSE2 table for more comments. + { ISD::SDIV, MVT::v16i32, 16*20 }, + { ISD::SDIV, MVT::v8i64, 8*20 }, + { ISD::UDIV, MVT::v16i32, 16*20 }, + { ISD::UDIV, MVT::v8i64, 8*20 } }; - if (ST->hasAVX512()) { + if (ST->hasAVX512()) if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVX2CostTable[] = { // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to @@ -315,10 +317,9 @@ int X86TTIImpl::getArithmeticInstrCost( }; // Look for XOP lowering tricks. - if (ST->hasXOP()) { + if (ST->hasXOP()) if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVX2CustomCostTable[] = { { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. @@ -334,6 +335,8 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v8i32, 1 }, // pmulld + { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ @@ -344,11 +347,10 @@ int X86TTIImpl::getArithmeticInstrCost( }; // Look for AVX2 lowering tricks for custom cases. - if (ST->hasAVX2()) { + if (ST->hasAVX2()) if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVXCustomCostTable[] = { { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. @@ -372,24 +374,10 @@ int X86TTIImpl::getArithmeticInstrCost( }; // Look for AVX2 lowering tricks for custom cases. - if (ST->hasAVX()) { + if (ST->hasAVX()) if (const auto *Entry = CostTableLookup(AVXCustomCostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } - - static const CostTblEntry SSE42FloatCostTable[] = { - { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ - { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ - { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ - { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ - }; - - if (ST->hasSSE42()) { - if (const auto *Entry = CostTableLookup(SSE42FloatCostTable, ISD, - LT.second)) - return LT.first * Entry->Cost; - } static const CostTblEntry SSE2UniformCostTable[] = { @@ -452,6 +440,17 @@ int X86TTIImpl::getArithmeticInstrCost( ISD = ISD::MUL; } + static const CostTblEntry SSE42CostTable[] = { + { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ + { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ + }; + + if (ST->hasSSE42()) + if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) + return LT.first * Entry->Cost; + static const CostTblEntry SSE41CostTable[] = { { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence. @@ -471,44 +470,39 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence. { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend. + + { ISD::MUL, MVT::v4i32, 1 } // pmulld }; - if (ST->hasSSE41()) { + if (ST->hasSSE41()) if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry SSE2CostTable[] = { // We don't correctly identify costs of casts because they are marked as // custom. { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. - { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence. { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. - { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence. { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul. { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. - { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence. { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. - { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence. { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. - { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend. { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. - { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence. { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. - { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence. { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. - { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend. { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle + { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ @@ -531,10 +525,9 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::UDIV, MVT::v2i64, 2*20 }, }; - if (ST->hasSSE2()) { + if (ST->hasSSE2()) if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } static const CostTblEntry AVX1CostTable[] = { // We don't have to scalarize unsupported ops. We can issue two half-sized @@ -553,307 +546,278 @@ int X86TTIImpl::getArithmeticInstrCost( // A v4i64 multiply is custom lowered as two split v2i64 vectors that then // are lowered as a series of long multiplies(3), shifts(3) and adds(2) // Because we believe v4i64 to be a legal type, we must also include the - // split factor of two in the cost table. Therefore, the cost here is 16 + // extract+insert in the cost table. Therefore, the cost here is 18 // instead of 8. - { ISD::MUL, MVT::v4i64, 16 }, + { ISD::MUL, MVT::v4i64, 18 }, }; // Look for AVX1 lowering tricks. - if (ST->hasAVX() && !ST->hasAVX2()) { - MVT VT = LT.second; - - if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT)) + if (ST->hasAVX() && !ST->hasAVX2()) + if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) return LT.first * Entry->Cost; - } - // Custom lowering of vectors. - static const CostTblEntry CustomLowered[] = { - // A v2i64/v4i64 and multiply is custom lowered as a series of long - // multiplies(3), shifts(3) and adds(2). - { ISD::MUL, MVT::v2i64, 8 }, - { ISD::MUL, MVT::v4i64, 8 }, - { ISD::MUL, MVT::v8i64, 8 } - }; - if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second)) - return LT.first * Entry->Cost; - - // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, - // 2x pmuludq, 2x shuffle. - if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && - !ST->hasSSE41()) - return LT.first * 6; - - static const CostTblEntry SSE1FloatCostTable[] = { + static const CostTblEntry SSE1CostTable[] = { { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ }; if (ST->hasSSE1()) - if (const auto *Entry = CostTableLookup(SSE1FloatCostTable, ISD, - LT.second)) + if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) return LT.first * Entry->Cost; + // Fallback to the default implementation. return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); } int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp) { - if (Kind == TTI::SK_Reverse || Kind == TTI::SK_Alternate) { - // 64-bit packed float vectors (v2f32) are widened to type v4f32. - // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. - std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); - - static const CostTblEntry AVX512VBMIShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb - { TTI::SK_Reverse, MVT::v32i8, 1 } // vpermb - }; - - if (ST->hasVBMI()) - if (const auto *Entry = - CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + // 64-bit packed float vectors (v2f32) are widened to type v4f32. + // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. + std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); + + // For Broadcasts we are splatting the first element from the first input + // register, so only need to reference that input and all the output + // registers are the same. + if (Kind == TTI::SK_Broadcast) + LT.first = 1; + + // We are going to permute multiple sources and the result will be in multiple + // destinations. Providing an accurate cost only for splits where the element + // type remains the same. + if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { + MVT LegalVT = LT.second; + if (LegalVT.getVectorElementType().getSizeInBits() == + Tp->getVectorElementType()->getPrimitiveSizeInBits() && + LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { + + unsigned VecTySize = DL.getTypeStoreSize(Tp); + unsigned LegalVTSize = LegalVT.getStoreSize(); + // Number of source vectors after legalization: + unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; + // Number of destination vectors after legalization: + unsigned NumOfDests = LT.first; + + Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), + LegalVT.getVectorNumElements()); + + unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; + return NumOfShuffles * + getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); + } - static const CostTblEntry AVX512BWShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw - { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw - { TTI::SK_Reverse, MVT::v64i8, 6 } // vextracti64x4 + 2*vperm2i128 - // + 2*pshufb + vinserti64x4 - }; + return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); + } - if (ST->hasBWI()) - if (const auto *Entry = - CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + // For 2-input shuffles, we must account for splitting the 2 inputs into many. + if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { + // We assume that source and destination have the same vector type. + int NumOfDests = LT.first; + int NumOfShufflesPerDest = LT.first * 2 - 1; + LT.first = NumOfDests * NumOfShufflesPerDest; + } - static const CostTblEntry AVX512ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd - { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps - { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq - { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd - }; + static const CostTblEntry AVX512VBMIShuffleTbl[] = { + { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb + { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb - if (ST->hasAVX512()) - if (const auto *Entry = - CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb + { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb - static const CostTblEntry AVX2ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd - { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps - { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq - { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd - { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb - { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb + { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b + { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b + { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b + }; - { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw - { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb - }; + if (ST->hasVBMI()) + if (const auto *Entry = + CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - if (ST->hasAVX2()) - if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + static const CostTblEntry AVX512BWShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb + + { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw + { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw + { TTI::SK_Reverse, MVT::v64i8, 6 }, // vextracti64x4 + 2*vperm2i128 + // + 2*pshufb + vinserti64x4 + + { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw + { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw + { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw + { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 + { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc + + { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w + { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w + { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w + { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc + { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 + { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc + }; - static const CostTblEntry AVX1ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd - { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps - { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd - { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps - { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb - // + vinsertf128 - { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb - // + vinsertf128 - - { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd - { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd - { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps - { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps - { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor - { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor - }; + if (ST->hasBWI()) + if (const auto *Entry = + CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - if (ST->hasAVX()) - if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + static const CostTblEntry AVX512ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd + { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps + { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq + { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd + + { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd + { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps + { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq + { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd + + { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd + { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd + { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd + { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps + { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps + { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps + { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq + { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq + { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq + { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd + { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd + { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd + { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb + + { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd + { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps + { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q + { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d + { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd + { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps + { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q + { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d + { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd + { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps + { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q + { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d + }; - static const CostTblEntry SSE41ShuffleTbl[] = { - { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw - { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd - { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw - { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps - { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw - { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb - }; - - if (ST->hasSSE41()) - if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + if (ST->hasAVX512()) + if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - static const CostTblEntry SSSE3ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb - { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb + static const CostTblEntry AVX2ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd + { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps + { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq + { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd + { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw + { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb + + { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd + { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps + { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq + { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd + { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb + { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb + + { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw + { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb + }; - { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por - { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por - }; + if (ST->hasAVX2()) + if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - if (ST->hasSSSE3()) - if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + static const CostTblEntry AVX1ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd + { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps + { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd + { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps + { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 + { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 + + { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd + { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps + { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd + { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps + { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb + // + vinsertf128 + { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb + // + vinsertf128 + + { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd + { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd + { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps + { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps + { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor + { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor + }; - static const CostTblEntry SSE2ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd - { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd - { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd - { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd - { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw - // + 2*pshufd + 2*unpck + packus - - { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd - { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd - { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps - { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por - { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por - }; - - if (ST->hasSSE2()) - if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + if (ST->hasAVX()) + if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - static const CostTblEntry SSE1ShuffleTbl[] = { - { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps - { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps - }; + static const CostTblEntry SSE41ShuffleTbl[] = { + { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw + { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd + { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw + { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps + { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw + { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb + }; - if (ST->hasSSE1()) - if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) - return LT.first * Entry->Cost; + if (ST->hasSSE41()) + if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; - } else if (Kind == TTI::SK_PermuteTwoSrc) { - // We assume that source and destination have the same vector type. - std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); - int NumOfDests = LT.first; - int NumOfShufflesPerDest = LT.first * 2 - 1; - int NumOfShuffles = NumOfDests * NumOfShufflesPerDest; - - static const CostTblEntry AVX512VBMIShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermt2b - {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1}, // vpermt2b - {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // vpermt2b - }; - - if (ST->hasVBMI()) - if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl, - ISD::VECTOR_SHUFFLE, LT.second)) - return NumOfShuffles * Entry->Cost; - - static const CostTblEntry AVX512BWShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermt2w - {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermt2w - {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermt2w - {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3}, // zext + vpermt2w + trunc - {ISD::VECTOR_SHUFFLE, MVT::v64i8, 19}, // 6 * v32i8 + 1 - {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // zext + vpermt2w + trunc - }; - - if (ST->hasBWI()) - if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl, - ISD::VECTOR_SHUFFLE, LT.second)) - return NumOfShuffles * Entry->Cost; - - static const CostTblEntry AVX512ShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermt2pd - {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermt2ps - {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermt2q - {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermt2d - {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermt2pd - {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermt2ps - {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermt2q - {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermt2d - {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermt2pd - {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermt2ps - {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermt2q - {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1} // vpermt2d - }; + static const CostTblEntry SSSE3ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb + { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb - if (ST->hasAVX512()) - if (const auto *Entry = - CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) - return NumOfShuffles * Entry->Cost; - - } else if (Kind == TTI::SK_PermuteSingleSrc) { - std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); - if (LT.first == 1) { - - static const CostTblEntry AVX512VBMIShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v64i8, 1}, // vpermb - {ISD::VECTOR_SHUFFLE, MVT::v32i8, 1} // vpermb - }; - - if (ST->hasVBMI()) - if (const auto *Entry = CostTableLookup(AVX512VBMIShuffleTbl, - ISD::VECTOR_SHUFFLE, LT.second)) - return Entry->Cost; - - static const CostTblEntry AVX512BWShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v32i16, 1}, // vpermw - {ISD::VECTOR_SHUFFLE, MVT::v16i16, 1}, // vpermw - {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, // vpermw - {ISD::VECTOR_SHUFFLE, MVT::v64i8, 8}, // extend to v32i16 - {ISD::VECTOR_SHUFFLE, MVT::v32i8, 3} // vpermw + zext/trunc - }; - - if (ST->hasBWI()) - if (const auto *Entry = CostTableLookup(AVX512BWShuffleTbl, - ISD::VECTOR_SHUFFLE, LT.second)) - return Entry->Cost; - - static const CostTblEntry AVX512ShuffleTbl[] = { - {ISD::VECTOR_SHUFFLE, MVT::v8f64, 1}, // vpermpd - {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vpermpd - {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // vpermpd - {ISD::VECTOR_SHUFFLE, MVT::v16f32, 1}, // vpermps - {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vpermps - {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, // vpermps - {ISD::VECTOR_SHUFFLE, MVT::v8i64, 1}, // vpermq - {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vpermq - {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // vpermq - {ISD::VECTOR_SHUFFLE, MVT::v16i32, 1}, // vpermd - {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vpermd - {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, // vpermd - {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1} // pshufb - }; - - if (ST->hasAVX512()) - if (const auto *Entry = - CostTableLookup(AVX512ShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second)) - return Entry->Cost; - - } else { - // We are going to permute multiple sources and the result will be in - // multiple destinations. Providing an accurate cost only for splits where - // the element type remains the same. - - MVT LegalVT = LT.second; - if (LegalVT.getVectorElementType().getSizeInBits() == - Tp->getVectorElementType()->getPrimitiveSizeInBits() && - LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { - - unsigned VecTySize = DL.getTypeStoreSize(Tp); - unsigned LegalVTSize = LegalVT.getStoreSize(); - // Number of source vectors after legalization: - unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; - // Number of destination vectors after legalization: - unsigned NumOfDests = LT.first; - - Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), - LegalVT.getVectorNumElements()); - - unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; - return NumOfShuffles * - getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); - } - } - } + { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb + { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb + + { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por + { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por + }; + + if (ST->hasSSSE3()) + if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; + + static const CostTblEntry SSE2ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd + { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd + { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd + { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd + { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd + + { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd + { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd + { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd + { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd + { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw + // + 2*pshufd + 2*unpck + packus + + { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd + { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd + { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps + { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por + { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por + }; + + if (ST->hasSSE2()) + if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; + + static const CostTblEntry SSE1ShuffleTbl[] = { + { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps + { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps + { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps + }; + + if (ST->hasSSE1()) + if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) + return LT.first * Entry->Cost; return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); } @@ -1623,17 +1587,29 @@ int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, return Cost+LT.first; } -int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { +int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, + const SCEV *Ptr) { // Address computations in vectorized code with non-consecutive addresses will // likely result in more instructions compared to scalar code where the // computation can more often be merged into the index mode. The resulting // extra micro-ops can significantly decrease throughput. unsigned NumVectorInstToHideOverhead = 10; - if (Ty->isVectorTy() && IsComplex) - return NumVectorInstToHideOverhead; + // Cost modeling of Strided Access Computation is hidden by the indexing + // modes of X86 regardless of the stride value. We dont believe that there + // is a difference between constant strided access in gerenal and constant + // strided value which is less than or equal to 64. + // Even in the case of (loop invariant) stride whose value is not known at + // compile time, the address computation will not incur more than one extra + // ADD instruction. + if (Ty->isVectorTy() && SE) { + if (!BaseT::isStridedAccess(Ptr)) + return NumVectorInstToHideOverhead; + if (!BaseT::getConstantStrideStep(SE, Ptr)) + return 1; + } - return BaseT::getAddressComputationCost(Ty, IsComplex); + return BaseT::getAddressComputationCost(Ty, SE, Ptr); } int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, diff --git a/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h index f6bcb9f569e4..c013805f4321 100644 --- a/lib/Target/X86/X86TargetTransformInfo.h +++ b/lib/Target/X86/X86TargetTransformInfo.h @@ -71,7 +71,8 @@ public: unsigned AddressSpace); int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, bool VariableMask, unsigned Alignment); - int getAddressComputationCost(Type *PtrTy, bool IsComplex); + int getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, + const SCEV *Ptr); int getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF); |