diff options
Diffstat (limited to 'include/llvm/Target/TargetLowering.h')
-rw-r--r-- | include/llvm/Target/TargetLowering.h | 348 |
1 files changed, 160 insertions, 188 deletions
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 43d6f2772c5e6..e2fe9e85fc92f 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -77,8 +77,8 @@ namespace llvm { /// This base class for TargetLowering contains the SelectionDAG-independent /// parts that can be used from the rest of CodeGen. class TargetLoweringBase { - TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; - void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION; + TargetLoweringBase(const TargetLoweringBase&) = delete; + void operator=(const TargetLoweringBase&) = delete; public: /// This enum indicates whether operations are valid for a target, and if not, @@ -100,7 +100,8 @@ public: TypeExpandFloat, // Split this float into two of half the size. TypeScalarizeVector, // Replace this one-element vector with its element. TypeSplitVector, // Split this vector into two of half the size. - TypeWidenVector // This vector should be widened into a larger vector. + TypeWidenVector, // This vector should be widened into a larger vector. + TypePromoteFloat // Replace this float with a larger one. }; /// LegalizeKind holds the legalization kind that needs to happen to EVT @@ -123,6 +124,18 @@ public: // mask (ex: x86 blends). }; + /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists + /// because different targets have different levels of support for these + /// atomic RMW instructions, and also have different options w.r.t. what they + /// should expand to. + enum class AtomicRMWExpansionKind { + None, // Don't expand the instruction. + LLSC, // Expand the instruction into loadlinked/storeconditional; used + // by ARM/AArch64. Implies `hasLoadLinkedStoreConditional` + // returns true. + CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. + }; + static ISD::NodeType getExtendForContent(BooleanContent Content) { switch (Content) { case UndefinedBooleanContent: @@ -148,13 +161,11 @@ protected: public: const TargetMachine &getTargetMachine() const { return TM; } - const DataLayout *getDataLayout() const { return DL; } - const TargetLoweringObjectFile &getObjFileLowering() const { - return *TM.getObjFileLowering(); - } + const DataLayout *getDataLayout() const { return TM.getDataLayout(); } bool isBigEndian() const { return !IsLittleEndian; } bool isLittleEndian() const { return IsLittleEndian; } + virtual bool useSoftFloat() const { return false; } /// Return the pointer type for the given address space, defaults to /// the pointer type from the data layout. @@ -247,19 +258,29 @@ public: /// isLoadBitCastBeneficial() - Return true if the following transform /// is beneficial. /// fold (conv (load x)) -> (load (conv*)x) - /// On architectures that don't natively support some vector loads efficiently, - /// casting the load to a smaller vector of larger types and loading - /// is more efficient, however, this can be undone by optimizations in + /// On architectures that don't natively support some vector loads + /// efficiently, casting the load to a smaller vector of larger types and + /// loading is more efficient, however, this can be undone by optimizations in /// dag combiner. - virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const { + virtual bool isLoadBitCastBeneficial(EVT /* Load */, + EVT /* Bitcast */) const { return true; } + /// Return true if it is expected to be cheaper to do a store of a non-zero + /// vector constant with the given size and type for the address space than to + /// store the individual scalar element constants. + virtual bool storeOfVectorConstantIsCheap(EVT MemVT, + unsigned NumElem, + unsigned AddrSpace) const { + return false; + } + /// \brief Return true if it is cheap to speculate a call to intrinsic cttz. virtual bool isCheapToSpeculateCttz() const { return false; } - + /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz. virtual bool isCheapToSpeculateCtlz() const { return false; @@ -562,7 +583,8 @@ public: /// Return how this load with extension should be treated: either it is legal, /// needs to be promoted to a larger size, needs to be expanded to some other /// code sequence, or the target has a custom expander for it. - LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const { + LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, + EVT MemVT) const { if (ValVT.isExtended() || MemVT.isExtended()) return Expand; unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; @@ -577,6 +599,14 @@ public: getLoadExtAction(ExtType, ValVT, MemVT) == Legal; } + /// Return true if the specified load with extension is legal or custom + /// on this target. + bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { + return ValVT.isSimple() && MemVT.isSimple() && + (getLoadExtAction(ExtType, ValVT, MemVT) == Legal || + getLoadExtAction(ExtType, ValVT, MemVT) == Custom); + } + /// Return how this store with truncation should be treated: either it is /// legal, needs to be promoted to a larger size, needs to be expanded to some /// other code sequence, or the target has a custom expander for it. @@ -954,17 +984,20 @@ public: return false; } - /// Returns the maximal possible offset which can be used for loads / stores - /// from the global. - virtual unsigned getMaximalGlobalOffset() const { - return 0; - } - /// Returns true if a cast between SrcAS and DestAS is a noop. virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const { return false; } + /// Return true if the pointer arguments to CI should be aligned by aligning + /// the object whose address is being passed. If so then MinSize is set to the + /// minimum size the object must be to be aligned and PrefAlign is set to the + /// preferred alignment. + virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, + unsigned & /*PrefAlign*/) const { + return false; + } + //===--------------------------------------------------------------------===// /// \name Helpers for TargetTransformInfo implementations /// @{ @@ -1031,8 +1064,9 @@ public: /// seq_cst. But if they are lowered to monotonic accesses, no amount of /// IR-level fences can prevent it. /// @{ - virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, - bool IsStore, bool IsLoad) const { + virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, + AtomicOrdering Ord, bool IsStore, + bool IsLoad) const { if (!getInsertFencesForAtomic()) return nullptr; @@ -1042,8 +1076,9 @@ public: return nullptr; } - virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, - bool IsStore, bool IsLoad) const { + virtual Instruction *emitTrailingFence(IRBuilder<> &Builder, + AtomicOrdering Ord, bool IsStore, + bool IsLoad) const { if (!getInsertFencesForAtomic()) return nullptr; @@ -1060,15 +1095,21 @@ public: return false; } + /// Returns true if arguments should be sign-extended in lib calls. + virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { + return IsSigned; + } + /// Returns true if the given (atomic) load should be expanded by the /// IR-level AtomicExpand pass into a load-linked instruction /// (through emitLoadLinked()). virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; } - /// Returns true if the given AtomicRMW should be expanded by the - /// IR-level AtomicExpand pass into a loop using LoadLinked/StoreConditional. - virtual bool shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const { - return false; + /// Returns how the IR-level AtomicExpand pass should expand the given + /// AtomicRMW, if at all. Default is to never expand. + virtual AtomicRMWExpansionKind + shouldExpandAtomicRMWInIR(AtomicRMWInst *) const { + return AtomicRMWExpansionKind::None; } /// On some platforms, an AtomicRMW that never actually modifies the value @@ -1082,17 +1123,33 @@ public: /// it succeeds, and nullptr otherwise. /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo /// another round of expansion. - virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { + virtual LoadInst * + lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { return nullptr; } + + /// Returns true if we should normalize + /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and + /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely + /// that it saves us from materializing N0 and N1 in an integer register. + /// Targets that are able to perform and/or on flags should return false here. + virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, + EVT VT) const { + // If a target has multiple condition registers, then it likely has logical + // operations on those registers. + if (hasMultipleConditionRegisters()) + return false; + // Only do the transform if the value won't be split into multiple + // registers. + LegalizeTypeAction Action = getTypeAction(Context, VT); + return Action != TypeExpandInteger && Action != TypeExpandFloat && + Action != TypeSplitVector; + } + //===--------------------------------------------------------------------===// // TargetLowering Configuration Methods - These methods should be invoked by // the derived class constructor to configure this object for the target. // - - /// \brief Reset the operation actions based on target options. - virtual void resetOperationActions() {} - protected: /// Specify how the target extends the result of integer and floating point /// boolean values from i1 to a wider type. See getBooleanContents. @@ -1230,12 +1287,12 @@ protected: /// Return the largest legal super-reg register class of the register class /// for the specified type and its associated "cost". - virtual std::pair<const TargetRegisterClass*, uint8_t> - findRepresentativeClass(MVT VT) const; + virtual std::pair<const TargetRegisterClass *, uint8_t> + findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; /// Once all of the register classes are added, this allows us to compute /// derived properties we expose. - void computeRegisterProperties(); + void computeRegisterProperties(const TargetRegisterInfo *TRI); /// Indicate that the specified operation does not work with the specified /// type and indicate what to do about it. @@ -1455,6 +1512,35 @@ public: return false; } + virtual bool isProfitableToHoist(Instruction *I) const { return true; } + + /// Return true if the extension represented by \p I is free. + /// Unlikely the is[Z|FP]ExtFree family which is based on types, + /// this method can use the context provided by \p I to decide + /// whether or not \p I is free. + /// This method extends the behavior of the is[Z|FP]ExtFree family. + /// In other words, if is[Z|FP]Free returns true, then this method + /// returns true as well. The converse is not true. + /// The target can perform the adequate checks by overriding isExtFreeImpl. + /// \pre \p I must be a sign, zero, or fp extension. + bool isExtFree(const Instruction *I) const { + switch (I->getOpcode()) { + case Instruction::FPExt: + if (isFPExtFree(EVT::getEVT(I->getType()))) + return true; + break; + case Instruction::ZExt: + if (isZExtFree(I->getOperand(0)->getType(), I->getType())) + return true; + break; + case Instruction::SExt: + break; + default: + llvm_unreachable("Instruction is not an extension"); + } + return isExtFreeImpl(I); + } + /// Return true if any actual instruction that defines a value of type Ty1 /// implicitly zero-extends the value to Ty2 in the result register. /// @@ -1517,6 +1603,10 @@ public: return false; } + /// Return true if folding a vector load into ExtVal (a sign, zero, or any + /// extend node) is profitable. + virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } + /// Return true if an fneg operation is free to the point where it is never /// worthwhile to replace it with a bitwise operation. virtual bool isFNegFree(EVT VT) const { @@ -1606,7 +1696,6 @@ public: private: const TargetMachine &TM; - const DataLayout *DL; /// True if this is a little endian target. bool IsLittleEndian; @@ -1783,136 +1872,8 @@ private: ValueTypeActionImpl ValueTypeActions; -public: - LegalizeKind - getTypeConversion(LLVMContext &Context, EVT VT) const { - // If this is a simple type, use the ComputeRegisterProp mechanism. - if (VT.isSimple()) { - MVT SVT = VT.getSimpleVT(); - assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); - MVT NVT = TransformToType[SVT.SimpleTy]; - LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); - - assert( - (LA == TypeLegal || LA == TypeSoftenFloat || - ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) - && "Promote may not follow Expand or Promote"); - - if (LA == TypeSplitVector) - return LegalizeKind(LA, EVT::getVectorVT(Context, - SVT.getVectorElementType(), - SVT.getVectorNumElements()/2)); - if (LA == TypeScalarizeVector) - return LegalizeKind(LA, SVT.getVectorElementType()); - return LegalizeKind(LA, NVT); - } - - // Handle Extended Scalar Types. - if (!VT.isVector()) { - assert(VT.isInteger() && "Float types must be simple"); - unsigned BitSize = VT.getSizeInBits(); - // First promote to a power-of-two size, then expand if necessary. - if (BitSize < 8 || !isPowerOf2_32(BitSize)) { - EVT NVT = VT.getRoundIntegerType(Context); - assert(NVT != VT && "Unable to round integer VT"); - LegalizeKind NextStep = getTypeConversion(Context, NVT); - // Avoid multi-step promotion. - if (NextStep.first == TypePromoteInteger) return NextStep; - // Return rounded integer type. - return LegalizeKind(TypePromoteInteger, NVT); - } - - return LegalizeKind(TypeExpandInteger, - EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); - } - - // Handle vector types. - unsigned NumElts = VT.getVectorNumElements(); - EVT EltVT = VT.getVectorElementType(); - - // Vectors with only one element are always scalarized. - if (NumElts == 1) - return LegalizeKind(TypeScalarizeVector, EltVT); - - // Try to widen vector elements until the element type is a power of two and - // promote it to a legal type later on, for example: - // <3 x i8> -> <4 x i8> -> <4 x i32> - if (EltVT.isInteger()) { - // Vectors with a number of elements that is not a power of two are always - // widened, for example <3 x i8> -> <4 x i8>. - if (!VT.isPow2VectorType()) { - NumElts = (unsigned)NextPowerOf2(NumElts); - EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); - return LegalizeKind(TypeWidenVector, NVT); - } - - // Examine the element type. - LegalizeKind LK = getTypeConversion(Context, EltVT); - - // If type is to be expanded, split the vector. - // <4 x i140> -> <2 x i140> - if (LK.first == TypeExpandInteger) - return LegalizeKind(TypeSplitVector, - EVT::getVectorVT(Context, EltVT, NumElts / 2)); - - // Promote the integer element types until a legal vector type is found - // or until the element integer type is too big. If a legal type was not - // found, fallback to the usual mechanism of widening/splitting the - // vector. - EVT OldEltVT = EltVT; - while (1) { - // Increase the bitwidth of the element to the next pow-of-two - // (which is greater than 8 bits). - EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() - ).getRoundIntegerType(Context); - - // Stop trying when getting a non-simple element type. - // Note that vector elements may be greater than legal vector element - // types. Example: X86 XMM registers hold 64bit element on 32bit - // systems. - if (!EltVT.isSimple()) break; - - // Build a new vector type and check if it is legal. - MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); - // Found a legal promoted vector type. - if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) - return LegalizeKind(TypePromoteInteger, - EVT::getVectorVT(Context, EltVT, NumElts)); - } - - // Reset the type to the unexpanded type if we did not find a legal vector - // type with a promoted vector element type. - EltVT = OldEltVT; - } - - // Try to widen the vector until a legal type is found. - // If there is no wider legal type, split the vector. - while (1) { - // Round up to the next power of 2. - NumElts = (unsigned)NextPowerOf2(NumElts); - - // If there is no simple vector type with this many elements then there - // cannot be a larger legal vector type. Note that this assumes that - // there are no skipped intermediate vector types in the simple types. - if (!EltVT.isSimple()) break; - MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); - if (LargerVector == MVT()) break; - - // If this type is legal then widen the vector. - if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) - return LegalizeKind(TypeWidenVector, LargerVector); - } - - // Widen odd vectors to next power of two. - if (!VT.isPow2VectorType()) { - EVT NVT = VT.getPow2VectorType(Context); - return LegalizeKind(TypeWidenVector, NVT); - } - - // Vectors with illegal element types are expanded. - EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); - return LegalizeKind(TypeSplitVector, NVT); - } +private: + LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; private: std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses; @@ -1943,6 +1904,11 @@ private: CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; protected: + /// Return true if the extension represented by \p I is free. + /// \pre \p I is a sign, zero, or fp extension and + /// is[Z|FP]ExtFree of the related types is not true. + virtual bool isExtFreeImpl(const Instruction *I) const { return false; } + /// \brief Specify maximum number of store instructions per memset call. /// /// When lowering \@llvm.memset this field specifies the maximum number of @@ -2010,7 +1976,8 @@ protected: /// Replace/modify any TargetFrameIndex operands with a targte-dependent /// sequence of memory operands that is recognized by PrologEpilogInserter. - MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const; + MachineBasicBlock *emitPatchPoint(MachineInstr *MI, + MachineBasicBlock *MBB) const; }; /// This class defines information used to lower LLVM code to legal SelectionDAG @@ -2019,8 +1986,8 @@ protected: /// This class also defines callbacks that targets must implement to lower /// target-specific constructs to SelectionDAG operators. class TargetLowering : public TargetLoweringBase { - TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; - void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; + TargetLowering(const TargetLowering&) = delete; + void operator=(const TargetLowering&) = delete; public: /// NOTE: The TargetMachine owns TLOF. @@ -2080,6 +2047,7 @@ public: ISD::CondCode &CCCode, SDLoc DL) const; /// Returns a pair of (return value, chain). + /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, unsigned NumOps, bool isSigned, @@ -2171,8 +2139,7 @@ public: void AddToWorklist(SDNode *N); void RemoveFromWorklist(SDNode *N); - SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, - bool AddTo = true); + SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true); SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); @@ -2653,7 +2620,8 @@ public: /// specific constraints and their prefixes, and also tie in the associated /// operand values. If this returns an empty vector, and if the constraint /// string itself isn't empty, there was an error parsing. - virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; + virtual AsmOperandInfoVector ParseConstraints(const TargetRegisterInfo *TRI, + ImmutableCallSite CS) const; /// Examine constraint type and operand type and determine a weight value. /// The operand object must already have been set up with the operand type. @@ -2684,10 +2652,19 @@ public: /// pointer. /// /// This should only be used for C_Register constraints. On error, this - /// returns a register number of 0 and a null register class pointer.. - virtual std::pair<unsigned, const TargetRegisterClass*> - getRegForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + /// returns a register number of 0 and a null register class pointer. + virtual std::pair<unsigned, const TargetRegisterClass *> + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + const std::string &Constraint, MVT VT) const; + + virtual unsigned + getInlineAsmMemConstraint(const std::string &ConstraintCode) const { + if (ConstraintCode == "i") + return InlineAsm::Constraint_i; + else if (ConstraintCode == "m") + return InlineAsm::Constraint_m; + return InlineAsm::Constraint_Unknown; + } /// Try to replace an X constraint, which matches anything, with another that /// has more specific requirements based on the type of the corresponding @@ -2725,7 +2702,7 @@ public: /// Hooks for building estimates in place of slower divisions and square /// roots. - + /// Return a reciprocal square root estimate value for the input operand. /// The RefinementSteps output is the number of Newton-Raphson refinement /// iterations required to generate a sufficient (though not necessarily @@ -2736,10 +2713,9 @@ public: /// If that's true, then return '0' as the number of RefinementSteps to avoid /// any further refinement of the estimate. /// An empty SDValue return means no estimate sequence can be created. - virtual SDValue getRsqrtEstimate(SDValue Operand, - DAGCombinerInfo &DCI, - unsigned &RefinementSteps, - bool &UseOneConstNR) const { + virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI, + unsigned &RefinementSteps, + bool &UseOneConstNR) const { return SDValue(); } @@ -2751,8 +2727,7 @@ public: /// If that's true, then return '0' as the number of RefinementSteps to avoid /// any further refinement of the estimate. /// An empty SDValue return means no estimate sequence can be created. - virtual SDValue getRecipEstimate(SDValue Operand, - DAGCombinerInfo &DCI, + virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI, unsigned &RefinementSteps) const { return SDValue(); } @@ -2791,6 +2766,8 @@ public: /// is created but not inserted into any basic blocks, and this method is /// called to expand it into a sequence of instructions, potentially also /// creating new basic blocks and control flow. + /// As long as the returned basic block is different (i.e., we created a new + /// one), the custom inserter is free to modify the rest of \p MBB. virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; @@ -2806,11 +2783,6 @@ public: virtual bool useLoadStackGuardNode() const { return false; } - - /// Returns true if arguments should be sign-extended in lib calls. - virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { - return IsSigned; - } }; /// Given an LLVM IR type and return type attributes, compute the return value |