diff options
Diffstat (limited to 'lib/Target/AArch64/AArch64ISelLowering.h')
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.h | 42 |
1 files changed, 32 insertions, 10 deletions
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h index cb0b9ef261dc..cc25bede8d62 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.h +++ b/lib/Target/AArch64/AArch64ISelLowering.h @@ -12,8 +12,8 @@ // //===----------------------------------------------------------------------===// -#ifndef LLVM_TARGET_AArch64_ISELLOWERING_H -#define LLVM_TARGET_AArch64_ISELLOWERING_H +#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H +#define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/SelectionDAG.h" @@ -162,6 +162,16 @@ enum { SITOF, UITOF, + /// Natural vector cast. ISD::BITCAST is not natural in the big-endian + /// world w.r.t vectors; which causes additional REV instructions to be + /// generated to compensate for the byte-swapping. But sometimes we do + /// need to re-interpret the data in SIMD vector registers in big-endian + /// mode without emitting such REV instructions. + NVCAST, + + SMULL, + UMULL, + // NEON Load/Store with post-increment base updates LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE, LD3post, @@ -197,10 +207,9 @@ class AArch64TargetLowering : public TargetLowering { bool RequireStrictAlign; public: - explicit AArch64TargetLowering(TargetMachine &TM); + explicit AArch64TargetLowering(const TargetMachine &TM); - /// Selects the correct CCAssignFn for a the given CallingConvention - /// value. + /// Selects the correct CCAssignFn for a given CallingConvention value. CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const; /// computeKnownBitsForTargetNode - Determine which of the bits specified in @@ -212,10 +221,11 @@ public: MVT getScalarShiftAmountTy(EVT LHSTy) const override; - /// allowsUnalignedMemoryAccesses - Returns true if the target allows + /// allowsMisalignedMemoryAccesses - Returns true if the target allows /// unaligned memory accesses. of the specified type. - bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0, - bool *Fast = nullptr) const override { + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0, + unsigned Align = 1, + bool *Fast = nullptr) const override { if (RequireStrictAlign) return false; // FIXME: True for Cyclone, but not necessary others. @@ -317,13 +327,17 @@ public: bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; + bool hasLoadLinkedStoreConditional() const override; Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override; Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override; - bool shouldExpandAtomicInIR(Instruction *Inst) const override; + bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override; + bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; + bool shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override; + bool useLoadStackGuardNode() const override; TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override; @@ -424,6 +438,10 @@ private: SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; + SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, + std::vector<SDNode *> *Created) const override; + bool combineRepeatedFPDivisors(unsigned NumUsers) const override; + ConstraintType getConstraintType(const std::string &Constraint) const override; unsigned getRegisterByName(const char* RegName, EVT VT) const override; @@ -455,6 +473,10 @@ private: void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const override; + + bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, + CallingConv::ID CallConv, + bool isVarArg) const override; }; namespace AArch64 { @@ -464,4 +486,4 @@ FastISel *createFastISel(FunctionLoweringInfo &funcInfo, } // end namespace llvm -#endif // LLVM_TARGET_AArch64_ISELLOWERING_H +#endif |