diff options
Diffstat (limited to 'include/llvm/IR')
-rw-r--r-- | include/llvm/IR/Constants.h | 10 | ||||
-rw-r--r-- | include/llvm/IR/IRBuilder.h | 20 | ||||
-rw-r--r-- | include/llvm/IR/Instructions.h | 210 | ||||
-rw-r--r-- | include/llvm/IR/IntrinsicInst.h | 169 | ||||
-rw-r--r-- | include/llvm/IR/Intrinsics.td | 16 | ||||
-rw-r--r-- | include/llvm/IR/LLVMContext.h | 28 | ||||
-rw-r--r-- | include/llvm/IR/Module.h | 2 | ||||
-rw-r--r-- | include/llvm/IR/ModuleSummaryIndex.h | 10 | ||||
-rw-r--r-- | include/llvm/IR/PassManager.h | 31 | ||||
-rw-r--r-- | include/llvm/IR/PatternMatch.h | 73 | ||||
-rw-r--r-- | include/llvm/IR/SafepointIRVerifier.h | 35 | ||||
-rw-r--r-- | include/llvm/IR/Type.h | 6 |
12 files changed, 449 insertions, 161 deletions
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h index 8b3a90fa065b..2e72c41ccee3 100644 --- a/include/llvm/IR/Constants.h +++ b/include/llvm/IR/Constants.h @@ -680,11 +680,6 @@ class ConstantDataArray final : public ConstantDataSequential { explicit ConstantDataArray(Type *ty, const char *Data) : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {} - /// Allocate space for exactly zero operands. - void *operator new(size_t s) { - return User::operator new(s, 0); - } - public: ConstantDataArray(const ConstantDataArray &) = delete; @@ -739,11 +734,6 @@ class ConstantDataVector final : public ConstantDataSequential { explicit ConstantDataVector(Type *ty, const char *Data) : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {} - // allocate space for exactly zero operands. - void *operator new(size_t s) { - return User::operator new(s, 0); - } - public: ConstantDataVector(const ConstantDataVector &) = delete; diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h index ec33f82f7022..5344a93efb33 100644 --- a/include/llvm/IR/IRBuilder.h +++ b/include/llvm/IR/IRBuilder.h @@ -1062,7 +1062,7 @@ public: Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { if (Constant *RC = dyn_cast<Constant>(RHS)) { - if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue()) + if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne()) return LHS; // LHS & -1 -> LHS if (Constant *LC = dyn_cast<Constant>(LHS)) return Insert(Folder.CreateAnd(LC, RC), Name); @@ -1203,22 +1203,22 @@ public: return SI; } FenceInst *CreateFence(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread, + SyncScope::ID SSID = SyncScope::System, const Twine &Name = "") { - return Insert(new FenceInst(Context, Ordering, SynchScope), Name); + return Insert(new FenceInst(Context, Ordering, SSID), Name); } AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope = CrossThread) { + SyncScope::ID SSID = SyncScope::System) { return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, - FailureOrdering, SynchScope)); + FailureOrdering, SSID)); } AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { - return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope)); + SyncScope::ID SSID = SyncScope::System) { + return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID)); } Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &Name = "") { @@ -1517,11 +1517,9 @@ public: const Twine &Name = "") { if (V->getType() == DestTy) return V; - if (V->getType()->getScalarType()->isPointerTy() && - DestTy->getScalarType()->isIntegerTy()) + if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) return CreatePtrToInt(V, DestTy, Name); - if (V->getType()->getScalarType()->isIntegerTy() && - DestTy->getScalarType()->isPointerTy()) + if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) return CreateIntToPtr(V, DestTy, Name); return CreateBitCast(V, DestTy, Name); diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h index dc5f37450b48..60ae98869e55 100644 --- a/include/llvm/IR/Instructions.h +++ b/include/llvm/IR/Instructions.h @@ -52,11 +52,6 @@ class ConstantInt; class DataLayout; class LLVMContext; -enum SynchronizationScope { - SingleThread = 0, - CrossThread = 1 -}; - //===----------------------------------------------------------------------===// // AllocaInst Class //===----------------------------------------------------------------------===// @@ -195,17 +190,16 @@ public: LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, - AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread, + AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr) : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr, - NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {} + NameStr, isVolatile, Align, Order, SSID, InsertBefore) {} LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope = CrossThread, + SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr); LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, - unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore); LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd); @@ -235,34 +229,34 @@ public: void setAlignment(unsigned Align); - /// Returns the ordering effect of this fence. + /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); } - /// Set the ordering constraint on this load. May not be Release or - /// AcquireRelease. + /// Sets the ordering constraint of this load instruction. May not be Release + /// or AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | ((unsigned)Ordering << 7)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + /// Returns the synchronization scope ID of this load instruction. + SyncScope::ID getSyncScopeID() const { + return SSID; } - /// Specify whether this load is ordered with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | - (xthread << 6)); + /// Sets the synchronization scope ID of this load instruction. + void setSyncScopeID(SyncScope::ID SSID) { + this->SSID = SSID; } + /// Sets the ordering constraint and the synchronization scope ID of this load + /// instruction. void setAtomic(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { + SyncScope::ID SSID = SyncScope::System) { setOrdering(Ordering); - setSynchScope(SynchScope); + setSyncScopeID(SSID); } bool isSimple() const { return !isAtomic() && !isVolatile(); } @@ -297,6 +291,11 @@ private: void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// The synchronization scope ID of this load instruction. Not quite enough + /// room in SubClassData for everything, so synchronization scope ID gets its + /// own field. + SyncScope::ID SSID; }; //===----------------------------------------------------------------------===// @@ -325,11 +324,10 @@ public: unsigned Align, BasicBlock *InsertAtEnd); StoreInst(Value *Val, Value *Ptr, bool isVolatile, unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope = CrossThread, + SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr); StoreInst(Value *Val, Value *Ptr, bool isVolatile, - unsigned Align, AtomicOrdering Order, - SynchronizationScope SynchScope, + unsigned Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); // allocate space for exactly two operands @@ -356,34 +354,34 @@ public: void setAlignment(unsigned Align); - /// Returns the ordering effect of this store. + /// Returns the ordering constraint of this store instruction. AtomicOrdering getOrdering() const { return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7); } - /// Set the ordering constraint on this store. May not be Acquire or - /// AcquireRelease. + /// Sets the ordering constraint of this store instruction. May not be + /// Acquire or AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | ((unsigned)Ordering << 7)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1); + /// Returns the synchronization scope ID of this store instruction. + SyncScope::ID getSyncScopeID() const { + return SSID; } - /// Specify whether this store instruction is ordered with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) | - (xthread << 6)); + /// Sets the synchronization scope ID of this store instruction. + void setSyncScopeID(SyncScope::ID SSID) { + this->SSID = SSID; } + /// Sets the ordering constraint and the synchronization scope ID of this + /// store instruction. void setAtomic(AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread) { + SyncScope::ID SSID = SyncScope::System) { setOrdering(Ordering); - setSynchScope(SynchScope); + setSyncScopeID(SSID); } bool isSimple() const { return !isAtomic() && !isVolatile(); } @@ -421,6 +419,11 @@ private: void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// The synchronization scope ID of this store instruction. Not quite enough + /// room in SubClassData for everything, so synchronization scope ID gets its + /// own field. + SyncScope::ID SSID; }; template <> @@ -435,7 +438,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) /// An instruction for ordering other memory operations. class FenceInst : public Instruction { - void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope); + void Init(AtomicOrdering Ordering, SyncScope::ID SSID); protected: // Note: Instruction needs to be a friend here to call cloneImpl. @@ -447,10 +450,9 @@ public: // Ordering may only be Acquire, Release, AcquireRelease, or // SequentiallyConsistent. FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope = CrossThread, + SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr); - FenceInst(LLVMContext &C, AtomicOrdering Ordering, - SynchronizationScope SynchScope, + FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock *InsertAtEnd); // allocate space for exactly zero operands @@ -458,28 +460,26 @@ public: return User::operator new(s, 0); } - /// Returns the ordering effect of this fence. + /// Returns the ordering constraint of this fence instruction. AtomicOrdering getOrdering() const { return AtomicOrdering(getSubclassDataFromInstruction() >> 1); } - /// Set the ordering constraint on this fence. May only be Acquire, Release, - /// AcquireRelease, or SequentiallyConsistent. + /// Sets the ordering constraint of this fence instruction. May only be + /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | ((unsigned)Ordering << 1)); } - SynchronizationScope getSynchScope() const { - return SynchronizationScope(getSubclassDataFromInstruction() & 1); + /// Returns the synchronization scope ID of this fence instruction. + SyncScope::ID getSyncScopeID() const { + return SSID; } - /// Specify whether this fence orders other operations with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope xthread) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) | - xthread); + /// Sets the synchronization scope ID of this fence instruction. + void setSyncScopeID(SyncScope::ID SSID) { + this->SSID = SSID; } // Methods for support type inquiry through isa, cast, and dyn_cast: @@ -496,6 +496,11 @@ private: void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// The synchronization scope ID of this fence instruction. Not quite enough + /// room in SubClassData for everything, so synchronization scope ID gets its + /// own field. + SyncScope::ID SSID; }; //===----------------------------------------------------------------------===// @@ -509,7 +514,7 @@ private: class AtomicCmpXchgInst : public Instruction { void Init(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope); + SyncScope::ID SSID); protected: // Note: Instruction needs to be a friend here to call cloneImpl. @@ -521,13 +526,11 @@ public: AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, - Instruction *InsertBefore = nullptr); + SyncScope::ID SSID, Instruction *InsertBefore = nullptr); AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, - SynchronizationScope SynchScope, - BasicBlock *InsertAtEnd); + SyncScope::ID SSID, BasicBlock *InsertAtEnd); // allocate space for exactly three operands void *operator new(size_t s) { @@ -561,7 +564,12 @@ public: /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); - /// Set the ordering constraint on this cmpxchg. + /// Returns the success ordering constraint of this cmpxchg instruction. + AtomicOrdering getSuccessOrdering() const { + return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); + } + + /// Sets the success ordering constraint of this cmpxchg instruction. void setSuccessOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); @@ -569,6 +577,12 @@ public: ((unsigned)Ordering << 2)); } + /// Returns the failure ordering constraint of this cmpxchg instruction. + AtomicOrdering getFailureOrdering() const { + return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); + } + + /// Sets the failure ordering constraint of this cmpxchg instruction. void setFailureOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); @@ -576,28 +590,14 @@ public: ((unsigned)Ordering << 5)); } - /// Specify whether this cmpxchg is atomic and orders other operations with - /// respect to all concurrently executing threads, or only with respect to - /// signal handlers executing in the same thread. - void setSynchScope(SynchronizationScope SynchScope) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | - (SynchScope << 1)); - } - - /// Returns the ordering constraint on this cmpxchg. - AtomicOrdering getSuccessOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); - } - - /// Returns the ordering constraint on this cmpxchg. - AtomicOrdering getFailureOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7); + /// Returns the synchronization scope ID of this cmpxchg instruction. + SyncScope::ID getSyncScopeID() const { + return SSID; } - /// Returns whether this cmpxchg is atomic between threads or only within a - /// single thread. - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); + /// Sets the synchronization scope ID of this cmpxchg instruction. + void setSyncScopeID(SyncScope::ID SSID) { + this->SSID = SSID; } Value *getPointerOperand() { return getOperand(0); } @@ -652,6 +652,11 @@ private: void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// The synchronization scope ID of this cmpxchg instruction. Not quite + /// enough room in SubClassData for everything, so synchronization scope ID + /// gets its own field. + SyncScope::ID SSID; }; template <> @@ -711,10 +716,10 @@ public: }; AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope, + AtomicOrdering Ordering, SyncScope::ID SSID, Instruction *InsertBefore = nullptr); AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope, + AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock *InsertAtEnd); // allocate space for exactly two operands @@ -748,7 +753,12 @@ public: /// Transparently provide more efficient getOperand methods. DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); - /// Set the ordering constraint on this RMW. + /// Returns the ordering constraint of this rmw instruction. + AtomicOrdering getOrdering() const { + return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); + } + + /// Sets the ordering constraint of this rmw instruction. void setOrdering(AtomicOrdering Ordering) { assert(Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."); @@ -756,23 +766,14 @@ public: ((unsigned)Ordering << 2)); } - /// Specify whether this RMW orders other operations with respect to all - /// concurrently executing threads, or only with respect to signal handlers - /// executing in the same thread. - void setSynchScope(SynchronizationScope SynchScope) { - setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) | - (SynchScope << 1)); + /// Returns the synchronization scope ID of this rmw instruction. + SyncScope::ID getSyncScopeID() const { + return SSID; } - /// Returns the ordering constraint on this RMW. - AtomicOrdering getOrdering() const { - return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7); - } - - /// Returns whether this RMW is atomic between threads or only within a - /// single thread. - SynchronizationScope getSynchScope() const { - return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1); + /// Sets the synchronization scope ID of this rmw instruction. + void setSyncScopeID(SyncScope::ID SSID) { + this->SSID = SSID; } Value *getPointerOperand() { return getOperand(0); } @@ -797,13 +798,18 @@ public: private: void Init(BinOp Operation, Value *Ptr, Value *Val, - AtomicOrdering Ordering, SynchronizationScope SynchScope); + AtomicOrdering Ordering, SyncScope::ID SSID); // Shadow Instruction::setInstructionSubclassData with a private forwarding // method so that subclasses cannot accidentally use it. void setInstructionSubclassData(unsigned short D) { Instruction::setInstructionSubclassData(D); } + + /// The synchronization scope ID of this rmw instruction. Not quite enough + /// room in SubClassData for everything, so synchronization scope ID gets its + /// own field. + SyncScope::ID SSID; }; template <> @@ -1101,8 +1107,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) /// Represent an integer comparison operator. class ICmpInst: public CmpInst { void AssertOK() { - assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE && - getPredicate() <= CmpInst::LAST_ICMP_PREDICATE && + assert(isIntPredicate() && "Invalid ICmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!"); @@ -1244,8 +1249,7 @@ public: /// Represents a floating point comparison operator. class FCmpInst: public CmpInst { void AssertOK() { - assert(getPredicate() <= FCmpInst::LAST_FCMP_PREDICATE && - "Invalid FCmp predicate value"); + assert(isFPPredicate() && "Invalid FCmp predicate value"); assert(getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!"); // Check that the operands are the right type diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h index 944af57a7800..f55d17ec72c8 100644 --- a/include/llvm/IR/IntrinsicInst.h +++ b/include/llvm/IR/IntrinsicInst.h @@ -296,6 +296,175 @@ namespace llvm { } }; + class ElementUnorderedAtomicMemMoveInst : public IntrinsicInst { + private: + enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 }; + + public: + Value *getRawDest() const { + return const_cast<Value *>(getArgOperand(ARG_DEST)); + } + const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); } + Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); } + + /// Return the arguments to the instruction. + Value *getRawSource() const { + return const_cast<Value *>(getArgOperand(ARG_SOURCE)); + } + const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); } + Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); } + + Value *getLength() const { + return const_cast<Value *>(getArgOperand(ARG_LENGTH)); + } + const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); } + Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); } + + bool isVolatile() const { return false; } + + Value *getRawElementSizeInBytes() const { + return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE)); + } + + ConstantInt *getElementSizeInBytesCst() const { + return cast<ConstantInt>(getRawElementSizeInBytes()); + } + + uint32_t getElementSizeInBytes() const { + return getElementSizeInBytesCst()->getZExtValue(); + } + + /// This is just like getRawDest, but it strips off any cast + /// instructions that feed it, giving the original input. The returned + /// value is guaranteed to be a pointer. + Value *getDest() const { return getRawDest()->stripPointerCasts(); } + + /// This is just like getRawSource, but it strips off any cast + /// instructions that feed it, giving the original input. The returned + /// value is guaranteed to be a pointer. + Value *getSource() const { return getRawSource()->stripPointerCasts(); } + + unsigned getDestAddressSpace() const { + return cast<PointerType>(getRawDest()->getType())->getAddressSpace(); + } + + unsigned getSourceAddressSpace() const { + return cast<PointerType>(getRawSource()->getType())->getAddressSpace(); + } + + /// Set the specified arguments of the instruction. + void setDest(Value *Ptr) { + assert(getRawDest()->getType() == Ptr->getType() && + "setDest called with pointer of wrong type!"); + setArgOperand(ARG_DEST, Ptr); + } + + void setSource(Value *Ptr) { + assert(getRawSource()->getType() == Ptr->getType() && + "setSource called with pointer of wrong type!"); + setArgOperand(ARG_SOURCE, Ptr); + } + + void setLength(Value *L) { + assert(getLength()->getType() == L->getType() && + "setLength called with value of wrong type!"); + setArgOperand(ARG_LENGTH, L); + } + + void setElementSizeInBytes(Constant *V) { + assert(V->getType() == Type::getInt8Ty(getContext()) && + "setElementSizeInBytes called with value of wrong type!"); + setArgOperand(ARG_ELEMENTSIZE, V); + } + + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + + /// This class represents atomic memset intrinsic + /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is + /// C&P of all methods from that hierarchy + class ElementUnorderedAtomicMemSetInst : public IntrinsicInst { + private: + enum { ARG_DEST = 0, ARG_VALUE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 }; + + public: + Value *getRawDest() const { + return const_cast<Value *>(getArgOperand(ARG_DEST)); + } + const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); } + Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); } + + Value *getValue() const { return const_cast<Value*>(getArgOperand(ARG_VALUE)); } + const Use &getValueUse() const { return getArgOperandUse(ARG_VALUE); } + Use &getValueUse() { return getArgOperandUse(ARG_VALUE); } + + Value *getLength() const { + return const_cast<Value *>(getArgOperand(ARG_LENGTH)); + } + const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); } + Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); } + + bool isVolatile() const { return false; } + + Value *getRawElementSizeInBytes() const { + return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE)); + } + + ConstantInt *getElementSizeInBytesCst() const { + return cast<ConstantInt>(getRawElementSizeInBytes()); + } + + uint32_t getElementSizeInBytes() const { + return getElementSizeInBytesCst()->getZExtValue(); + } + + /// This is just like getRawDest, but it strips off any cast + /// instructions that feed it, giving the original input. The returned + /// value is guaranteed to be a pointer. + Value *getDest() const { return getRawDest()->stripPointerCasts(); } + + unsigned getDestAddressSpace() const { + return cast<PointerType>(getRawDest()->getType())->getAddressSpace(); + } + + /// Set the specified arguments of the instruction. + void setDest(Value *Ptr) { + assert(getRawDest()->getType() == Ptr->getType() && + "setDest called with pointer of wrong type!"); + setArgOperand(ARG_DEST, Ptr); + } + + void setValue(Value *Val) { + assert(getValue()->getType() == Val->getType() && + "setValue called with value of wrong type!"); + setArgOperand(ARG_VALUE, Val); + } + + void setLength(Value *L) { + assert(getLength()->getType() == L->getType() && + "setLength called with value of wrong type!"); + setArgOperand(ARG_LENGTH, L); + } + + void setElementSizeInBytes(Constant *V) { + assert(V->getType() == Type::getInt8Ty(getContext()) && + "setElementSizeInBytes called with value of wrong type!"); + setArgOperand(ARG_ELEMENTSIZE, V); + } + + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic; + } + static inline bool classof(const Value *V) { + return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V)); + } + }; + /// This is the common base class for memset/memcpy/memmove. class MemIntrinsic : public IntrinsicInst { public: diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td index 45936a6e9b66..14c88e519435 100644 --- a/include/llvm/IR/Intrinsics.td +++ b/include/llvm/IR/Intrinsics.td @@ -873,6 +873,22 @@ def int_memcpy_element_unordered_atomic ReadOnly<1> ]>; +// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize) +def int_memmove_element_unordered_atomic + : Intrinsic<[], + [ + llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty + ], + [ + IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, + ReadOnly<1> + ]>; + +// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize) +def int_memset_element_unordered_atomic + : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ], + [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0> ]>; + //===------------------------ Reduction Intrinsics ------------------------===// // def int_experimental_vector_reduce_fadd : Intrinsic<[llvm_anyfloat_ty], diff --git a/include/llvm/IR/LLVMContext.h b/include/llvm/IR/LLVMContext.h index b27abad618c9..4cb77701f762 100644 --- a/include/llvm/IR/LLVMContext.h +++ b/include/llvm/IR/LLVMContext.h @@ -42,6 +42,24 @@ class Output; } // end namespace yaml +namespace SyncScope { + +typedef uint8_t ID; + +/// Known synchronization scope IDs, which always have the same value. All +/// synchronization scope IDs that LLVM has special knowledge of are listed +/// here. Additionally, this scheme allows LLVM to efficiently check for +/// specific synchronization scope ID without comparing strings. +enum { + /// Synchronized with respect to signal handlers executing in the same thread. + SingleThread = 0, + + /// Synchronized with respect to all concurrently executing threads. + System = 1 +}; + +} // end namespace SyncScope + /// This is an important class for using LLVM in a threaded context. It /// (opaquely) owns and manages the core "global" data of LLVM's core /// infrastructure, including the type and constant uniquing tables. @@ -111,6 +129,16 @@ public: /// tag registered with an LLVMContext has an unique ID. uint32_t getOperandBundleTagID(StringRef Tag) const; + /// getOrInsertSyncScopeID - Maps synchronization scope name to + /// synchronization scope ID. Every synchronization scope registered with + /// LLVMContext has unique ID except pre-defined ones. + SyncScope::ID getOrInsertSyncScopeID(StringRef SSN); + + /// getSyncScopeNames - Populates client supplied SmallVector with + /// synchronization scope names registered with LLVMContext. Synchronization + /// scope names are ordered by increasing synchronization scope IDs. + void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const; + /// Define the GC for a function void setGC(const Function &Fn, std::string GCName); diff --git a/include/llvm/IR/Module.h b/include/llvm/IR/Module.h index d47d82a57bff..196e32e3615c 100644 --- a/include/llvm/IR/Module.h +++ b/include/llvm/IR/Module.h @@ -249,7 +249,7 @@ public: /// when other randomness consuming passes are added or removed. In /// addition, the random stream will be reproducible across LLVM /// versions when the pass does not change. - RandomNumberGenerator *createRNG(const Pass* P) const; + std::unique_ptr<RandomNumberGenerator> createRNG(const Pass* P) const; /// @} /// @name Module Level Mutators diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h index b43d58865862..4aa8a0199ab1 100644 --- a/include/llvm/IR/ModuleSummaryIndex.h +++ b/include/llvm/IR/ModuleSummaryIndex.h @@ -47,7 +47,13 @@ template <typename T> struct MappingTraits; /// \brief Class to accumulate and hold information about a callee. struct CalleeInfo { - enum class HotnessType : uint8_t { Unknown = 0, Cold = 1, None = 2, Hot = 3 }; + enum class HotnessType : uint8_t { + Unknown = 0, + Cold = 1, + None = 2, + Hot = 3, + Critical = 4 + }; HotnessType Hotness = HotnessType::Unknown; CalleeInfo() = default; @@ -516,7 +522,7 @@ using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>; /// Map of global value GUID to its summary, used to identify values defined in /// a particular module, and provide efficient access to their summary. -using GVSummaryMapTy = std::map<GlobalValue::GUID, GlobalValueSummary *>; +using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>; /// Class to hold module path string table and global value map, /// and encapsulate methods for operating on them. diff --git a/include/llvm/IR/PassManager.h b/include/llvm/IR/PassManager.h index d03b7b65f81e..393175675034 100644 --- a/include/llvm/IR/PassManager.h +++ b/include/llvm/IR/PassManager.h @@ -162,6 +162,14 @@ public: return PA; } + /// \brief Construct a preserved analyses object with a single preserved set. + template <typename AnalysisSetT> + static PreservedAnalyses allInSet() { + PreservedAnalyses PA; + PA.preserveSet<AnalysisSetT>(); + return PA; + } + /// Mark an analysis as preserved. template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); } @@ -1062,10 +1070,27 @@ public: const AnalysisManagerT &getManager() const { return *AM; } - /// \brief Handle invalidation by ignoring it; this pass is immutable. + /// When invalidation occurs, remove any registered invalidation events. bool invalidate( - IRUnitT &, const PreservedAnalyses &, - typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &) { + IRUnitT &IRUnit, const PreservedAnalyses &PA, + typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv) { + // Loop over the set of registered outer invalidation mappings and if any + // of them map to an analysis that is now invalid, clear it out. + SmallVector<AnalysisKey *, 4> DeadKeys; + for (auto &KeyValuePair : OuterAnalysisInvalidationMap) { + AnalysisKey *OuterID = KeyValuePair.first; + auto &InnerIDs = KeyValuePair.second; + InnerIDs.erase(llvm::remove_if(InnerIDs, [&](AnalysisKey *InnerID) { + return Inv.invalidate(InnerID, IRUnit, PA); }), + InnerIDs.end()); + if (InnerIDs.empty()) + DeadKeys.push_back(OuterID); + } + + for (auto OuterID : DeadKeys) + OuterAnalysisInvalidationMap.erase(OuterID); + + // The proxy itself remains valid regardless of anything else. return false; } diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h index 5b69e7855cc7..acb895211644 100644 --- a/include/llvm/IR/PatternMatch.h +++ b/include/llvm/IR/PatternMatch.h @@ -158,12 +158,18 @@ struct match_neg_zero { /// zero inline match_neg_zero m_NegZero() { return match_neg_zero(); } +struct match_any_zero { + template <typename ITy> bool match(ITy *V) { + if (const auto *C = dyn_cast<Constant>(V)) + return C->isZeroValue(); + return false; + } +}; + /// \brief - Match an arbitrary zero/null constant. This includes /// zero_initializer for vectors and ConstantPointerNull for pointers. For /// floating point constants, this will match negative zero and positive zero -inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() { - return m_CombineOr(m_Zero(), m_NegZero()); -} +inline match_any_zero m_AnyZero() { return match_any_zero(); } struct match_nan { template <typename ITy> bool match(ITy *V) { @@ -176,6 +182,39 @@ struct match_nan { /// Match an arbitrary NaN constant. This includes quiet and signalling nans. inline match_nan m_NaN() { return match_nan(); } +struct match_one { + template <typename ITy> bool match(ITy *V) { + if (const auto *C = dyn_cast<Constant>(V)) + return C->isOneValue(); + return false; + } +}; + +/// \brief Match an integer 1 or a vector with all elements equal to 1. +inline match_one m_One() { return match_one(); } + +struct match_all_ones { + template <typename ITy> bool match(ITy *V) { + if (const auto *C = dyn_cast<Constant>(V)) + return C->isAllOnesValue(); + return false; + } +}; + +/// \brief Match an integer or vector with all bits set to true. +inline match_all_ones m_AllOnes() { return match_all_ones(); } + +struct match_sign_mask { + template <typename ITy> bool match(ITy *V) { + if (const auto *C = dyn_cast<Constant>(V)) + return C->isMinSignedValue(); + return false; + } +}; + +/// \brief Match an integer or vector with only the sign bit(s) set. +inline match_sign_mask m_SignMask() { return match_sign_mask(); } + struct apint_match { const APInt *&Res; @@ -259,34 +298,6 @@ template <typename Predicate> struct api_pred_ty : public Predicate { } }; -struct is_one { - bool isValue(const APInt &C) { return C.isOneValue(); } -}; - -/// \brief Match an integer 1 or a vector with all elements equal to 1. -inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); } -inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; } - -struct is_all_ones { - bool isValue(const APInt &C) { return C.isAllOnesValue(); } -}; - -/// \brief Match an integer or vector with all bits set to true. -inline cst_pred_ty<is_all_ones> m_AllOnes() { - return cst_pred_ty<is_all_ones>(); -} -inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; } - -struct is_sign_mask { - bool isValue(const APInt &C) { return C.isSignMask(); } -}; - -/// \brief Match an integer or vector with only the sign bit(s) set. -inline cst_pred_ty<is_sign_mask> m_SignMask() { - return cst_pred_ty<is_sign_mask>(); -} -inline api_pred_ty<is_sign_mask> m_SignMask(const APInt *&V) { return V; } - struct is_power2 { bool isValue(const APInt &C) { return C.isPowerOf2(); } }; diff --git a/include/llvm/IR/SafepointIRVerifier.h b/include/llvm/IR/SafepointIRVerifier.h new file mode 100644 index 000000000000..092050d1d207 --- /dev/null +++ b/include/llvm/IR/SafepointIRVerifier.h @@ -0,0 +1,35 @@ +//===- SafepointIRVerifier.h - Checks for GC relocation problems *- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a verifier which is useful for enforcing the relocation +// properties required by a relocating GC. Specifically, it looks for uses of +// the unrelocated value of pointer SSA values after a possible safepoint. It +// attempts to report no false negatives, but may end up reporting false +// positives in rare cases (see the note at the top of the corresponding cpp +// file.) +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_IR_SAFEPOINT_IR_VERIFIER +#define LLVM_IR_SAFEPOINT_IR_VERIFIER + +namespace llvm { + +class Function; +class FunctionPass; + +/// Run the safepoint verifier over a single function. Crashes on failure. +void verifySafepointIR(Function &F); + +/// Create an instance of the safepoint verifier pass which can be added to +/// a pass pipeline to check for relocation bugs. +FunctionPass *createSafepointIRVerifierPass(); +} + +#endif // LLVM_IR_SAFEPOINT_IR_VERIFIER diff --git a/include/llvm/IR/Type.h b/include/llvm/IR/Type.h index b37b59288e3f..ef7801266777 100644 --- a/include/llvm/IR/Type.h +++ b/include/llvm/IR/Type.h @@ -202,6 +202,12 @@ public: /// Return true if this is an integer type or a vector of integer types. bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); } + /// Return true if this is an integer type or a vector of integer types of + /// the given width. + bool isIntOrIntVectorTy(unsigned BitWidth) const { + return getScalarType()->isIntegerTy(BitWidth); + } + /// True if this is an instance of FunctionType. bool isFunctionTy() const { return getTypeID() == FunctionTyID; } |