aboutsummaryrefslogtreecommitdiff
path: root/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'lib/IR')
-rw-r--r--lib/IR/AsmWriter.cpp66
-rw-r--r--lib/IR/CMakeLists.txt1
-rw-r--r--lib/IR/ConstantFold.cpp38
-rw-r--r--lib/IR/Constants.cpp73
-rw-r--r--lib/IR/Core.cpp22
-rw-r--r--lib/IR/Instruction.cpp11
-rw-r--r--lib/IR/Instructions.cpp74
-rw-r--r--lib/IR/LLVMContext.cpp20
-rw-r--r--lib/IR/LLVMContextImpl.cpp14
-rw-r--r--lib/IR/LLVMContextImpl.h14
-rw-r--r--lib/IR/Module.cpp4
-rw-r--r--lib/IR/SafepointIRVerifier.cpp437
-rw-r--r--lib/IR/Type.cpp2
-rw-r--r--lib/IR/Verifier.cpp96
14 files changed, 721 insertions, 151 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index c7f112887a30..80371780fb6d 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -2119,6 +2119,8 @@ class AssemblyWriter {
bool ShouldPreserveUseListOrder;
UseListOrderStack UseListOrders;
SmallVector<StringRef, 8> MDNames;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
public:
/// Construct an AssemblyWriter with an external SlotTracker
@@ -2134,10 +2136,15 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs);
void writeOperandBundles(ImmutableCallSite CS);
- void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
- void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ void writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID);
+ void writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID);
+ void writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
@@ -2199,30 +2206,42 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
-void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- if (Ordering == AtomicOrdering::NotAtomic)
- return;
+void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
+ Out << " syncscope(\"";
+ PrintEscapedString(SSNs[SSID], Out);
+ Out << "\")";
+ break;
+ }
}
+}
+
+void AssemblyWriter::writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ if (Ordering == AtomicOrdering::NotAtomic)
+ return;
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(Ordering);
}
-void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+void AssemblyWriter::writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
FailureOrdering != AtomicOrdering::NotAtomic);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
- }
-
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(SuccessOrdering);
Out << " " << toIRString(FailureOrdering);
}
@@ -3215,21 +3234,22 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print atomic ordering/alignment for memory operations
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->isAtomic())
- writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
if (LI->getAlignment())
Out << ", align " << LI->getAlignment();
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
- writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
- CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
+ CXI->getFailureOrdering(), CXI->getSyncScopeID());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
- writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
- writeAtomic(FI->getOrdering(), FI->getSynchScope());
+ writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
}
// Print Metadata info.
diff --git a/lib/IR/CMakeLists.txt b/lib/IR/CMakeLists.txt
index 11259cbe1815..1cc229d68bfc 100644
--- a/lib/IR/CMakeLists.txt
+++ b/lib/IR/CMakeLists.txt
@@ -43,6 +43,7 @@ add_llvm_library(LLVMCore
Pass.cpp
PassManager.cpp
PassRegistry.cpp
+ SafepointIRVerifier.cpp
ProfileSummary.cpp
Statepoint.cpp
Type.cpp
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 3469026ad7ed..23ccd8d4cf42 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -242,7 +242,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
// X | -1 -> -1.
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
- if (RHSC->isAllOnesValue())
+ if (RHSC->isMinusOne())
return RHSC;
Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
@@ -1015,33 +1015,33 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
switch (Opcode) {
case Instruction::Add:
- if (CI2->equalsInt(0)) return C1; // X + 0 == X
+ if (CI2->isZero()) return C1; // X + 0 == X
break;
case Instruction::Sub:
- if (CI2->equalsInt(0)) return C1; // X - 0 == X
+ if (CI2->isZero()) return C1; // X - 0 == X
break;
case Instruction::Mul:
- if (CI2->equalsInt(0)) return C2; // X * 0 == 0
- if (CI2->equalsInt(1))
+ if (CI2->isZero()) return C2; // X * 0 == 0
+ if (CI2->isOne())
return C1; // X * 1 == X
break;
case Instruction::UDiv:
case Instruction::SDiv:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return C1; // X / 1 == X
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X / 0 == undef
break;
case Instruction::URem:
case Instruction::SRem:
- if (CI2->equalsInt(1))
+ if (CI2->isOne())
return Constant::getNullValue(CI2->getType()); // X % 1 == 0
- if (CI2->equalsInt(0))
+ if (CI2->isZero())
return UndefValue::get(CI2->getType()); // X % 0 == undef
break;
case Instruction::And:
if (CI2->isZero()) return C2; // X & 0 == 0
- if (CI2->isAllOnesValue())
+ if (CI2->isMinusOne())
return C1; // X & -1 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
@@ -1078,12 +1078,12 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
break;
case Instruction::Or:
- if (CI2->equalsInt(0)) return C1; // X | 0 == X
- if (CI2->isAllOnesValue())
+ if (CI2->isZero()) return C1; // X | 0 == X
+ if (CI2->isMinusOne())
return C2; // X | -1 == -1
break;
case Instruction::Xor:
- if (CI2->equalsInt(0)) return C1; // X ^ 0 == X
+ if (CI2->isZero()) return C1; // X ^ 0 == X
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
switch (CE1->getOpcode()) {
@@ -1091,7 +1091,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::ICmp:
case Instruction::FCmp:
// cmp pred ^ true -> cmp !pred
- assert(CI2->equalsInt(1));
+ assert(CI2->isOne());
CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
pred = CmpInst::getInversePredicate(pred);
return ConstantExpr::getCompare(pred, CE1->getOperand(0),
@@ -1126,18 +1126,18 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::Mul:
return ConstantInt::get(CI1->getContext(), C1V * C2V);
case Instruction::UDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
case Instruction::SDiv:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT / -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
case Instruction::URem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
case Instruction::SRem:
- assert(!CI2->isNullValue() && "Div by zero handled above");
+ assert(!CI2->isZero() && "Div by zero handled above");
if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
return UndefValue::get(CI1->getType()); // MIN_INT % -1 -> undef
return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
@@ -1170,7 +1170,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
case Instruction::LShr:
case Instruction::AShr:
case Instruction::Shl:
- if (CI1->equalsInt(0)) return C1;
+ if (CI1->isZero()) return C1;
break;
default:
break;
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index d387a6f0ecb9..e31779c83e3a 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -512,7 +512,7 @@ ConstantInt *ConstantInt::getFalse(LLVMContext &Context) {
}
Constant *ConstantInt::getTrue(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
ConstantInt *TrueC = ConstantInt::getTrue(Ty->getContext());
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), TrueC);
@@ -520,7 +520,7 @@ Constant *ConstantInt::getTrue(Type *Ty) {
}
Constant *ConstantInt::getFalse(Type *Ty) {
- assert(Ty->getScalarType()->isIntegerTy(1) && "Type not i1 or vector of i1.");
+ assert(Ty->isIntOrIntVectorTy(1) && "Type not i1 or vector of i1.");
ConstantInt *FalseC = ConstantInt::getFalse(Ty->getContext());
if (auto *VTy = dyn_cast<VectorType>(Ty))
return ConstantVector::getSplat(VTy->getNumElements(), FalseC);
@@ -1635,9 +1635,9 @@ Constant *ConstantExpr::getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced) {
Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isPointerTy() &&
+ assert(C->getType()->isPtrOrPtrVectorTy() &&
"PtrToInt source must be pointer or pointer vector");
- assert(DstTy->getScalarType()->isIntegerTy() &&
+ assert(DstTy->isIntOrIntVectorTy() &&
"PtrToInt destination must be integer or integer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1648,9 +1648,9 @@ Constant *ConstantExpr::getPtrToInt(Constant *C, Type *DstTy,
Constant *ConstantExpr::getIntToPtr(Constant *C, Type *DstTy,
bool OnlyIfReduced) {
- assert(C->getType()->getScalarType()->isIntegerTy() &&
+ assert(C->getType()->isIntOrIntVectorTy() &&
"IntToPtr source must be integer or integer vector");
- assert(DstTy->getScalarType()->isPointerTy() &&
+ assert(DstTy->isPtrOrPtrVectorTy() &&
"IntToPtr destination must be a pointer or pointer vector");
assert(isa<VectorType>(C->getType()) == isa<VectorType>(DstTy));
if (isa<VectorType>(C->getType()))
@@ -1914,8 +1914,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred >= ICmpInst::FIRST_ICMP_PREDICATE &&
- pred <= ICmpInst::LAST_ICMP_PREDICATE && "Invalid ICmp Predicate");
+ assert(CmpInst::isIntPredicate((CmpInst::Predicate)pred) &&
+ "Invalid ICmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -1939,7 +1939,8 @@ Constant *ConstantExpr::getICmp(unsigned short pred, Constant *LHS,
Constant *ConstantExpr::getFCmp(unsigned short pred, Constant *LHS,
Constant *RHS, bool OnlyIfReduced) {
assert(LHS->getType() == RHS->getType());
- assert(pred <= FCmpInst::LAST_FCMP_PREDICATE && "Invalid FCmp Predicate");
+ assert(CmpInst::isFPPredicate((CmpInst::Predicate)pred) &&
+ "Invalid FCmp Predicate");
if (Constant *FC = ConstantFoldCompareInstruction(pred, LHS, RHS))
return FC; // Fold a few common cases...
@@ -2379,32 +2380,32 @@ void ConstantDataSequential::destroyConstantImpl() {
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint8_t> Elts) {
Type *Ty = ArrayType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = ArrayType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = ArrayType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = ArrayType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with array type with an element
@@ -2416,27 +2417,26 @@ Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = ArrayType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = ArrayType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataArray::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = ArrayType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataArray::getString(LLVMContext &Context,
StringRef Str, bool AddNull) {
if (!AddNull) {
const uint8_t *Data = reinterpret_cast<const uint8_t *>(Str.data());
- return get(Context, makeArrayRef(const_cast<uint8_t *>(Data),
- Str.size()));
+ return get(Context, makeArrayRef(Data, Str.size()));
}
SmallVector<uint8_t, 64> ElementVals;
@@ -2451,32 +2451,32 @@ Constant *ConstantDataArray::getString(LLVMContext &Context,
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint8_t> Elts){
Type *Ty = VectorType::get(Type::getInt8Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*1), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 1), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint16_t> Elts){
Type *Ty = VectorType::get(Type::getInt16Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint32_t> Elts){
Type *Ty = VectorType::get(Type::getInt32Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<uint64_t> Elts){
Type *Ty = VectorType::get(Type::getInt64Ty(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<float> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size()*4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::get(LLVMContext &Context, ArrayRef<double> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
/// getFP() constructors - Return a constant with vector type with an element
@@ -2488,19 +2488,19 @@ Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint16_t> Elts) {
Type *Ty = VectorType::get(Type::getHalfTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 2), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 2), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint32_t> Elts) {
Type *Ty = VectorType::get(Type::getFloatTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 4), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 4), Ty);
}
Constant *ConstantDataVector::getFP(LLVMContext &Context,
ArrayRef<uint64_t> Elts) {
Type *Ty = VectorType::get(Type::getDoubleTy(Context), Elts.size());
const char *Data = reinterpret_cast<const char *>(Elts.data());
- return getImpl(StringRef(const_cast<char *>(Data), Elts.size() * 8), Ty);
+ return getImpl(StringRef(Data, Elts.size() * 8), Ty);
}
Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) {
@@ -2555,13 +2555,13 @@ uint64_t ConstantDataSequential::getElementAsInteger(unsigned Elt) const {
switch (getElementType()->getIntegerBitWidth()) {
default: llvm_unreachable("Invalid bitwidth for CDS");
case 8:
- return *const_cast<uint8_t *>(reinterpret_cast<const uint8_t *>(EltPtr));
+ return *reinterpret_cast<const uint8_t *>(EltPtr);
case 16:
- return *const_cast<uint16_t *>(reinterpret_cast<const uint16_t *>(EltPtr));
+ return *reinterpret_cast<const uint16_t *>(EltPtr);
case 32:
- return *const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(EltPtr));
+ return *reinterpret_cast<const uint32_t *>(EltPtr);
case 64:
- return *const_cast<uint64_t *>(reinterpret_cast<const uint64_t *>(EltPtr));
+ return *reinterpret_cast<const uint64_t *>(EltPtr);
}
}
@@ -2589,16 +2589,13 @@ APFloat ConstantDataSequential::getElementAsAPFloat(unsigned Elt) const {
float ConstantDataSequential::getElementAsFloat(unsigned Elt) const {
assert(getElementType()->isFloatTy() &&
"Accessor can only be used when element is a 'float'");
- const float *EltPtr = reinterpret_cast<const float *>(getElementPointer(Elt));
- return *const_cast<float *>(EltPtr);
+ return *reinterpret_cast<const float *>(getElementPointer(Elt));
}
double ConstantDataSequential::getElementAsDouble(unsigned Elt) const {
assert(getElementType()->isDoubleTy() &&
"Accessor can only be used when element is a 'float'");
- const double *EltPtr =
- reinterpret_cast<const double *>(getElementPointer(Elt));
- return *const_cast<double *>(EltPtr);
+ return *reinterpret_cast<const double *>(getElementPointer(Elt));
}
Constant *ConstantDataSequential::getElementAsConstant(unsigned Elt) const {
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 4ff0261a7f08..2165ae5a9470 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -50,6 +50,7 @@ void llvm::initializeCore(PassRegistry &Registry) {
initializePrintModulePassWrapperPass(Registry);
initializePrintFunctionPassWrapperPass(Registry);
initializePrintBasicBlockPassPass(Registry);
+ initializeSafepointIRVerifierPass(Registry);
initializeVerifierLegacyPassPass(Registry);
}
@@ -2755,11 +2756,14 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid AtomicOrdering value!");
}
+// TODO: Should this and other atomic instructions support building with
+// "syncscope"?
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
LLVMBool isSingleThread, const char *Name) {
return wrap(
unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
- isSingleThread ? SingleThread : CrossThread,
+ isSingleThread ? SyncScope::SingleThread
+ : SyncScope::System,
Name));
}
@@ -3041,7 +3045,8 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
}
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
- mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
+ mapFromLLVMOrdering(ordering), singleThread ? SyncScope::SingleThread
+ : SyncScope::System));
}
LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
@@ -3053,7 +3058,7 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(Ptr), unwrap(Cmp),
unwrap(New), mapFromLLVMOrdering(SuccessOrdering),
mapFromLLVMOrdering(FailureOrdering),
- singleThread ? SingleThread : CrossThread));
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
}
@@ -3061,17 +3066,18 @@ LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
Value *P = unwrap<Value>(AtomicInst);
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->getSynchScope() == SingleThread;
- return cast<AtomicCmpXchgInst>(P)->getSynchScope() == SingleThread;
+ return I->getSyncScopeID() == SyncScope::SingleThread;
+ return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
+ SyncScope::SingleThread;
}
void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) {
Value *P = unwrap<Value>(AtomicInst);
- SynchronizationScope Sync = NewValue ? SingleThread : CrossThread;
+ SyncScope::ID SSID = NewValue ? SyncScope::SingleThread : SyncScope::System;
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->setSynchScope(Sync);
- return cast<AtomicCmpXchgInst>(P)->setSynchScope(Sync);
+ return I->setSyncScopeID(SSID);
+ return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
}
LLVMAtomicOrdering LLVMGetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst) {
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index 3dd653d2d047..365cb019aec4 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -362,13 +362,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
@@ -386,7 +386,7 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
@@ -394,12 +394,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
CXI->getFailureOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ CXI->getSyncScopeID() ==
+ cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+ RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
return true;
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index a79b00be4ffe..2c49564e328b 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -1304,34 +1304,34 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBef) {}
+ SyncScope::System, InsertBef) {}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
: LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAE) {}
+ SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope, Instruction *InsertBef)
+ SyncScope::ID SSID, Instruction *InsertBef)
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
@@ -1419,16 +1419,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBefore) {}
+ SyncScope::System, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAtEnd) {}
+ SyncScope::System, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1438,13 +1438,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1454,7 +1454,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
@@ -1474,13 +1474,13 @@ void StoreInst::setAlignment(unsigned Align) {
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
setSuccessOrdering(SuccessOrdering);
setFailureOrdering(FailureOrdering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
"All operands must be non-null!");
@@ -1507,25 +1507,25 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1534,12 +1534,12 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) &&
"All operands must be non-null!");
@@ -1554,24 +1554,24 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertBefore) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertAtEnd) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1579,19 +1579,19 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
//===----------------------------------------------------------------------===//
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
//===----------------------------------------------------------------------===//
@@ -3064,16 +3064,14 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isPointerTy() &&
- DstTy->getScalarType()->isIntegerTy();
+ return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
case Instruction::IntToPtr:
if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
return false;
if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
return false;
- return SrcTy->getScalarType()->isIntegerTy() &&
- DstTy->getScalarType()->isPointerTy();
+ return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
case Instruction::BitCast: {
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
@@ -3797,12 +3795,12 @@ AllocaInst *AllocaInst::cloneImpl() const {
LoadInst *LoadInst::cloneImpl() const {
return new LoadInst(getOperand(0), Twine(), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
@@ -3810,7 +3808,7 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
getSuccessOrdering(), getFailureOrdering(),
- getSynchScope());
+ getSyncScopeID());
Result->setVolatile(isVolatile());
Result->setWeak(isWeak());
return Result;
@@ -3818,14 +3816,14 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
AtomicRMWInst *Result =
- new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
- getOrdering(), getSynchScope());
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
return Result;
}
FenceInst *FenceInst::cloneImpl() const {
- return new FenceInst(getContext(), getOrdering(), getSynchScope());
+ return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
}
TruncInst *TruncInst::cloneImpl() const {
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 2e13f362344d..c58459d6d5f5 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -81,6 +81,18 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
"gc-transition operand bundle id drifted!");
(void)GCTransitionEntry;
+
+ SyncScope::ID SingleThreadSSID =
+ pImpl->getOrInsertSyncScopeID("singlethread");
+ assert(SingleThreadSSID == SyncScope::SingleThread &&
+ "singlethread synchronization scope ID drifted!");
+ (void)SingleThreadSSID;
+
+ SyncScope::ID SystemSSID =
+ pImpl->getOrInsertSyncScopeID("");
+ assert(SystemSSID == SyncScope::System &&
+ "system synchronization scope ID drifted!");
+ (void)SystemSSID;
}
LLVMContext::~LLVMContext() { delete pImpl; }
@@ -255,6 +267,14 @@ uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}
+SyncScope::ID LLVMContext::getOrInsertSyncScopeID(StringRef SSN) {
+ return pImpl->getOrInsertSyncScopeID(SSN);
+}
+
+void LLVMContext::getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const {
+ pImpl->getSyncScopeNames(SSNs);
+}
+
void LLVMContext::setGC(const Function &Fn, std::string GCName) {
auto It = pImpl->GCNames.find(&Fn);
diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp
index c19e1be44fdc..57dd08b36fe7 100644
--- a/lib/IR/LLVMContextImpl.cpp
+++ b/lib/IR/LLVMContextImpl.cpp
@@ -205,6 +205,20 @@ uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
return I->second;
}
+SyncScope::ID LLVMContextImpl::getOrInsertSyncScopeID(StringRef SSN) {
+ auto NewSSID = SSC.size();
+ assert(NewSSID < std::numeric_limits<SyncScope::ID>::max() &&
+ "Hit the maximum number of synchronization scopes allowed!");
+ return SSC.insert(std::make_pair(SSN, SyncScope::ID(NewSSID))).first->second;
+}
+
+void LLVMContextImpl::getSyncScopeNames(
+ SmallVectorImpl<StringRef> &SSNs) const {
+ SSNs.resize(SSC.size());
+ for (const auto &SSE : SSC)
+ SSNs[SSE.second] = SSE.first();
+}
+
/// Singleton instance of the OptBisect class.
///
/// This singleton is accessed via the LLVMContext::getOptBisect() function. It
diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h
index 395beb57fe37..e413a4f34432 100644
--- a/lib/IR/LLVMContextImpl.h
+++ b/lib/IR/LLVMContextImpl.h
@@ -1297,6 +1297,20 @@ public:
void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// A set of interned synchronization scopes. The StringMap maps
+ /// synchronization scope names to their respective synchronization scope IDs.
+ StringMap<SyncScope::ID> SSC;
+
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Maintain the GC name for each function.
///
/// This saves allocating an additional word in Function for programs which
diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp
index f8853ed169c5..fdc7de6eaa34 100644
--- a/lib/IR/Module.cpp
+++ b/lib/IR/Module.cpp
@@ -88,7 +88,7 @@ Module::~Module() {
delete static_cast<StringMap<NamedMDNode *> *>(NamedMDSymTab);
}
-RandomNumberGenerator *Module::createRNG(const Pass* P) const {
+std::unique_ptr<RandomNumberGenerator> Module::createRNG(const Pass* P) const {
SmallString<32> Salt(P->getPassName());
// This RNG is guaranteed to produce the same random stream only
@@ -103,7 +103,7 @@ RandomNumberGenerator *Module::createRNG(const Pass* P) const {
// store salt metadata from the Module constructor.
Salt += sys::path::filename(getModuleIdentifier());
- return new RandomNumberGenerator(Salt);
+ return std::unique_ptr<RandomNumberGenerator>{new RandomNumberGenerator(Salt)};
}
/// getNamedValue - Return the first global value in the module with
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
new file mode 100644
index 000000000000..8b328c221da3
--- /dev/null
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -0,0 +1,437 @@
+//===-- SafepointIRVerifier.cpp - Verify gc.statepoint invariants ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run a sanity check on the IR to ensure that Safepoints - if they've been
+// inserted - were inserted correctly. In particular, look for use of
+// non-relocated values after a safepoint. It's primary use is to check the
+// correctness of safepoint insertion immediately after insertion, but it can
+// also be used to verify that later transforms have not found a way to break
+// safepoint semenatics.
+//
+// In its current form, this verify checks a property which is sufficient, but
+// not neccessary for correctness. There are some cases where an unrelocated
+// pointer can be used after the safepoint. Consider this example:
+//
+// a = ...
+// b = ...
+// (a',b') = safepoint(a,b)
+// c = cmp eq a b
+// br c, ..., ....
+//
+// Because it is valid to reorder 'c' above the safepoint, this is legal. In
+// practice, this is a somewhat uncommon transform, but CodeGenPrep does create
+// idioms like this. The verifier knows about these cases and avoids reporting
+// false positives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/SafepointIRVerifier.h"
+#include "llvm/IR/Statepoint.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "safepoint-ir-verifier"
+
+using namespace llvm;
+
+/// This option is used for writing test cases. Instead of crashing the program
+/// when verification fails, report a message to the console (for FileCheck
+/// usage) and continue execution as if nothing happened.
+static cl::opt<bool> PrintOnly("safepoint-ir-verifier-print-only",
+ cl::init(false));
+
+static void Verify(const Function &F, const DominatorTree &DT);
+
+struct SafepointIRVerifier : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ DominatorTree DT;
+ SafepointIRVerifier() : FunctionPass(ID) {
+ initializeSafepointIRVerifierPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ DT.recalculate(F);
+ Verify(F, DT);
+ return false; // no modifications
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ StringRef getPassName() const override { return "safepoint verifier"; }
+};
+
+void llvm::verifySafepointIR(Function &F) {
+ SafepointIRVerifier pass;
+ pass.runOnFunction(F);
+}
+
+char SafepointIRVerifier::ID = 0;
+
+FunctionPass *llvm::createSafepointIRVerifierPass() {
+ return new SafepointIRVerifier();
+}
+
+INITIALIZE_PASS_BEGIN(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+INITIALIZE_PASS_END(SafepointIRVerifier, "verify-safepoint-ir",
+ "Safepoint IR Verifier", false, true)
+
+static bool isGCPointerType(Type *T) {
+ if (auto *PT = dyn_cast<PointerType>(T))
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our
+ // GC managed heap. We know that a pointer into this heap needs to be
+ // updated and that no other pointer does.
+ return (1 == PT->getAddressSpace());
+ return false;
+}
+
+static bool containsGCPtrType(Type *Ty) {
+ if (isGCPointerType(Ty))
+ return true;
+ if (VectorType *VT = dyn_cast<VectorType>(Ty))
+ return isGCPointerType(VT->getScalarType());
+ if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
+ return containsGCPtrType(AT->getElementType());
+ if (StructType *ST = dyn_cast<StructType>(Ty))
+ return std::any_of(ST->subtypes().begin(), ST->subtypes().end(),
+ containsGCPtrType);
+ return false;
+}
+
+// Debugging aid -- prints a [Begin, End) range of values.
+template<typename IteratorTy>
+static void PrintValueSet(raw_ostream &OS, IteratorTy Begin, IteratorTy End) {
+ OS << "[ ";
+ while (Begin != End) {
+ OS << **Begin << " ";
+ ++Begin;
+ }
+ OS << "]";
+}
+
+/// The verifier algorithm is phrased in terms of availability. The set of
+/// values "available" at a given point in the control flow graph is the set of
+/// correctly relocated value at that point, and is a subset of the set of
+/// definitions dominating that point.
+
+/// State we compute and track per basic block.
+struct BasicBlockState {
+ // Set of values available coming in, before the phi nodes
+ DenseSet<const Value *> AvailableIn;
+
+ // Set of values available going out
+ DenseSet<const Value *> AvailableOut;
+
+ // AvailableOut minus AvailableIn.
+ // All elements are Instructions
+ DenseSet<const Value *> Contribution;
+
+ // True if this block contains a safepoint and thus AvailableIn does not
+ // contribute to AvailableOut.
+ bool Cleared = false;
+};
+
+
+/// Gather all the definitions dominating the start of BB into Result. This is
+/// simply the Defs introduced by every dominating basic block and the function
+/// arguments.
+static void GatherDominatingDefs(const BasicBlock *BB,
+ DenseSet<const Value *> &Result,
+ const DominatorTree &DT,
+ DenseMap<const BasicBlock *, BasicBlockState *> &BlockMap) {
+ DomTreeNode *DTN = DT[const_cast<BasicBlock *>(BB)];
+
+ while (DTN->getIDom()) {
+ DTN = DTN->getIDom();
+ const auto &Defs = BlockMap[DTN->getBlock()]->Contribution;
+ Result.insert(Defs.begin(), Defs.end());
+ // If this block is 'Cleared', then nothing LiveIn to this block can be
+ // available after this block completes. Note: This turns out to be
+ // really important for reducing memory consuption of the initial available
+ // sets and thus peak memory usage by this verifier.
+ if (BlockMap[DTN->getBlock()]->Cleared)
+ return;
+ }
+
+ for (const Argument &A : BB->getParent()->args())
+ if (containsGCPtrType(A.getType()))
+ Result.insert(&A);
+}
+
+/// Model the effect of an instruction on the set of available values.
+static void TransferInstruction(const Instruction &I, bool &Cleared,
+ DenseSet<const Value *> &Available) {
+ if (isStatepoint(I)) {
+ Cleared = true;
+ Available.clear();
+ } else if (containsGCPtrType(I.getType()))
+ Available.insert(&I);
+}
+
+/// Compute the AvailableOut set for BB, based on the
+/// BasicBlockState BBS, which is the BasicBlockState for BB. FirstPass is set
+/// when the verifier runs for the first time computing the AvailableOut set
+/// for BB.
+static void TransferBlock(const BasicBlock *BB,
+ BasicBlockState &BBS, bool FirstPass) {
+
+ const DenseSet<const Value *> &AvailableIn = BBS.AvailableIn;
+ DenseSet<const Value *> &AvailableOut = BBS.AvailableOut;
+
+ if (BBS.Cleared) {
+ // AvailableOut does not change no matter how the input changes, just
+ // leave it be. We need to force this calculation the first time so that
+ // we have a AvailableOut at all.
+ if (FirstPass) {
+ AvailableOut = BBS.Contribution;
+ }
+ } else {
+ // Otherwise, we need to reduce the AvailableOut set by things which are no
+ // longer in our AvailableIn
+ DenseSet<const Value *> Temp = BBS.Contribution;
+ set_union(Temp, AvailableIn);
+ AvailableOut = std::move(Temp);
+ }
+
+ DEBUG(dbgs() << "Transfered block " << BB->getName() << " from ";
+ PrintValueSet(dbgs(), AvailableIn.begin(), AvailableIn.end());
+ dbgs() << " to ";
+ PrintValueSet(dbgs(), AvailableOut.begin(), AvailableOut.end());
+ dbgs() << "\n";);
+}
+
+/// A given derived pointer can have multiple base pointers through phi/selects.
+/// This type indicates when the base pointer is exclusively constant
+/// (ExclusivelySomeConstant), and if that constant is proven to be exclusively
+/// null, we record that as ExclusivelyNull. In all other cases, the BaseType is
+/// NonConstant.
+enum BaseType {
+ NonConstant = 1, // Base pointers is not exclusively constant.
+ ExclusivelyNull,
+ ExclusivelySomeConstant // Base pointers for a given derived pointer is from a
+ // set of constants, but they are not exclusively
+ // null.
+};
+
+/// Return the baseType for Val which states whether Val is exclusively
+/// derived from constant/null, or not exclusively derived from constant.
+/// Val is exclusively derived off a constant base when all operands of phi and
+/// selects are derived off a constant base.
+static enum BaseType getBaseType(const Value *Val) {
+
+ SmallVector<const Value *, 32> Worklist;
+ DenseSet<const Value *> Visited;
+ bool isExclusivelyDerivedFromNull = true;
+ Worklist.push_back(Val);
+ // Strip through all the bitcasts and geps to get base pointer. Also check for
+ // the exclusive value when there can be multiple base pointers (through phis
+ // or selects).
+ while(!Worklist.empty()) {
+ const Value *V = Worklist.pop_back_val();
+ if (!Visited.insert(V).second)
+ continue;
+
+ if (const auto *CI = dyn_cast<CastInst>(V)) {
+ Worklist.push_back(CI->stripPointerCasts());
+ continue;
+ }
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
+ Worklist.push_back(GEP->getPointerOperand());
+ continue;
+ }
+ // Push all the incoming values of phi node into the worklist for
+ // processing.
+ if (const auto *PN = dyn_cast<PHINode>(V)) {
+ for (Value *InV: PN->incoming_values())
+ Worklist.push_back(InV);
+ continue;
+ }
+ if (const auto *SI = dyn_cast<SelectInst>(V)) {
+ // Push in the true and false values
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+ if (isa<Constant>(V)) {
+ // We found at least one base pointer which is non-null, so this derived
+ // pointer is not exclusively derived from null.
+ if (V != Constant::getNullValue(V->getType()))
+ isExclusivelyDerivedFromNull = false;
+ // Continue processing the remaining values to make sure it's exclusively
+ // constant.
+ continue;
+ }
+ // At this point, we know that the base pointer is not exclusively
+ // constant.
+ return BaseType::NonConstant;
+ }
+ // Now, we know that the base pointer is exclusively constant, but we need to
+ // differentiate between exclusive null constant and non-null constant.
+ return isExclusivelyDerivedFromNull ? BaseType::ExclusivelyNull
+ : BaseType::ExclusivelySomeConstant;
+}
+
+static void Verify(const Function &F, const DominatorTree &DT) {
+ SpecificBumpPtrAllocator<BasicBlockState> BSAllocator;
+ DenseMap<const BasicBlock *, BasicBlockState *> BlockMap;
+
+ DEBUG(dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n");
+ if (PrintOnly)
+ dbgs() << "Verifying gc pointers in function: " << F.getName() << "\n";
+
+
+ for (const BasicBlock &BB : F) {
+ BasicBlockState *BBS = new(BSAllocator.Allocate()) BasicBlockState;
+ for (const auto &I : BB)
+ TransferInstruction(I, BBS->Cleared, BBS->Contribution);
+ BlockMap[&BB] = BBS;
+ }
+
+ for (auto &BBI : BlockMap) {
+ GatherDominatingDefs(BBI.first, BBI.second->AvailableIn, DT, BlockMap);
+ TransferBlock(BBI.first, *BBI.second, true);
+ }
+
+ SetVector<const BasicBlock *> Worklist;
+ for (auto &BBI : BlockMap)
+ Worklist.insert(BBI.first);
+
+ // This loop iterates the AvailableIn and AvailableOut sets to a fixed point.
+ // The AvailableIn and AvailableOut sets decrease as we iterate.
+ while (!Worklist.empty()) {
+ const BasicBlock *BB = Worklist.pop_back_val();
+ BasicBlockState *BBS = BlockMap[BB];
+
+ size_t OldInCount = BBS->AvailableIn.size();
+ for (const BasicBlock *PBB : predecessors(BB))
+ set_intersect(BBS->AvailableIn, BlockMap[PBB]->AvailableOut);
+
+ if (OldInCount == BBS->AvailableIn.size())
+ continue;
+
+ assert(OldInCount > BBS->AvailableIn.size() && "invariant!");
+
+ size_t OldOutCount = BBS->AvailableOut.size();
+ TransferBlock(BB, *BBS, false);
+ if (OldOutCount != BBS->AvailableOut.size()) {
+ assert(OldOutCount > BBS->AvailableOut.size() && "invariant!");
+ Worklist.insert(succ_begin(BB), succ_end(BB));
+ }
+ }
+
+ // We now have all the information we need to decide if the use of a heap
+ // reference is legal or not, given our safepoint semantics.
+
+ bool AnyInvalidUses = false;
+
+ auto ReportInvalidUse = [&AnyInvalidUses](const Value &V,
+ const Instruction &I) {
+ errs() << "Illegal use of unrelocated value found!\n";
+ errs() << "Def: " << V << "\n";
+ errs() << "Use: " << I << "\n";
+ if (!PrintOnly)
+ abort();
+ AnyInvalidUses = true;
+ };
+
+ auto isNotExclusivelyConstantDerived = [](const Value *V) {
+ return getBaseType(V) == BaseType::NonConstant;
+ };
+
+ for (const BasicBlock &BB : F) {
+ // We destructively modify AvailableIn as we traverse the block instruction
+ // by instruction.
+ DenseSet<const Value *> &AvailableSet = BlockMap[&BB]->AvailableIn;
+ for (const Instruction &I : BB) {
+ if (const PHINode *PN = dyn_cast<PHINode>(&I)) {
+ if (containsGCPtrType(PN->getType()))
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ const BasicBlock *InBB = PN->getIncomingBlock(i);
+ const Value *InValue = PN->getIncomingValue(i);
+
+ if (isNotExclusivelyConstantDerived(InValue) &&
+ !BlockMap[InBB]->AvailableOut.count(InValue))
+ ReportInvalidUse(*InValue, *PN);
+ }
+ } else if (isa<CmpInst>(I) &&
+ containsGCPtrType(I.getOperand(0)->getType())) {
+ Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
+ enum BaseType baseTyLHS = getBaseType(LHS),
+ baseTyRHS = getBaseType(RHS);
+
+ // Returns true if LHS and RHS are unrelocated pointers and they are
+ // valid unrelocated uses.
+ auto hasValidUnrelocatedUse = [&AvailableSet, baseTyLHS, baseTyRHS, &LHS, &RHS] () {
+ // A cmp instruction has valid unrelocated pointer operands only if
+ // both operands are unrelocated pointers.
+ // In the comparison between two pointers, if one is an unrelocated
+ // use, the other *should be* an unrelocated use, for this
+ // instruction to contain valid unrelocated uses. This unrelocated
+ // use can be a null constant as well, or another unrelocated
+ // pointer.
+ if (AvailableSet.count(LHS) || AvailableSet.count(RHS))
+ return false;
+ // Constant pointers (that are not exclusively null) may have
+ // meaning in different VMs, so we cannot reorder the compare
+ // against constant pointers before the safepoint. In other words,
+ // comparison of an unrelocated use against a non-null constant
+ // maybe invalid.
+ if ((baseTyLHS == BaseType::ExclusivelySomeConstant &&
+ baseTyRHS == BaseType::NonConstant) ||
+ (baseTyLHS == BaseType::NonConstant &&
+ baseTyRHS == BaseType::ExclusivelySomeConstant))
+ return false;
+ // All other cases are valid cases enumerated below:
+ // 1. Comparison between an exlusively derived null pointer and a
+ // constant base pointer.
+ // 2. Comparison between an exlusively derived null pointer and a
+ // non-constant unrelocated base pointer.
+ // 3. Comparison between 2 unrelocated pointers.
+ return true;
+ };
+ if (!hasValidUnrelocatedUse()) {
+ // Print out all non-constant derived pointers that are unrelocated
+ // uses, which are invalid.
+ if (baseTyLHS == BaseType::NonConstant && !AvailableSet.count(LHS))
+ ReportInvalidUse(*LHS, I);
+ if (baseTyRHS == BaseType::NonConstant && !AvailableSet.count(RHS))
+ ReportInvalidUse(*RHS, I);
+ }
+ } else {
+ for (const Value *V : I.operands())
+ if (containsGCPtrType(V->getType()) &&
+ isNotExclusivelyConstantDerived(V) && !AvailableSet.count(V))
+ ReportInvalidUse(*V, I);
+ }
+
+ bool Cleared = false;
+ TransferInstruction(I, Cleared, AvailableSet);
+ (void)Cleared;
+ }
+ }
+
+ if (PrintOnly && !AnyInvalidUses) {
+ dbgs() << "No illegal uses found by SafepointIRVerifier in: " << F.getName()
+ << "\n";
+ }
+}
diff --git a/lib/IR/Type.cpp b/lib/IR/Type.cpp
index 44fe5e48c720..20e9c2b5fff2 100644
--- a/lib/IR/Type.cpp
+++ b/lib/IR/Type.cpp
@@ -538,7 +538,7 @@ bool CompositeType::indexValid(const Value *V) const {
if (auto *STy = dyn_cast<StructType>(this)) {
// Structure indexes require (vectors of) 32-bit integer constants. In the
// vector case all of the indices must be equal.
- if (!V->getType()->getScalarType()->isIntegerTy(32))
+ if (!V->getType()->isIntOrIntVectorTy(32))
return false;
const Constant *C = dyn_cast<Constant>(V);
if (C && V->getType()->isVectorTy())
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 819f63520c74..454a56a76923 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -2504,15 +2504,13 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isPointerTy(),
- "PtrToInt source must be pointer", &I);
+ Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
"ptrtoint not supported for non-integral pointers");
- Assert(DestTy->getScalarType()->isIntegerTy(),
- "PtrToInt result must be integral", &I);
+ Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
&I);
@@ -2531,10 +2529,9 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
Type *SrcTy = I.getOperand(0)->getType();
Type *DestTy = I.getType();
- Assert(SrcTy->getScalarType()->isIntegerTy(),
+ Assert(SrcTy->isIntOrIntVectorTy(),
"IntToPtr source must be an integral", &I);
- Assert(DestTy->getScalarType()->isPointerTy(),
- "IntToPtr result must be a pointer", &I);
+ Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
Assert(!DL.isNonIntegralPointerType(PTy),
@@ -2952,11 +2949,10 @@ void Verifier::visitICmpInst(ICmpInst &IC) {
Assert(Op0Ty == Op1Ty,
"Both operands to ICmp instruction are not of the same type!", &IC);
// Check that the operands are the right type
- Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->getScalarType()->isPointerTy(),
+ Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
"Invalid operand types for ICmp instruction", &IC);
// Check that the predicate is valid.
- Assert(IC.getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
- IC.getPredicate() <= CmpInst::LAST_ICMP_PREDICATE,
+ Assert(IC.isIntPredicate(),
"Invalid predicate in ICmp instruction!", &IC);
visitInstruction(IC);
@@ -2972,8 +2968,7 @@ void Verifier::visitFCmpInst(FCmpInst &FC) {
Assert(Op0Ty->isFPOrFPVectorTy(),
"Invalid operand types for FCmp instruction", &FC);
// Check that the predicate is valid.
- Assert(FC.getPredicate() >= CmpInst::FIRST_FCMP_PREDICATE &&
- FC.getPredicate() <= CmpInst::LAST_FCMP_PREDICATE,
+ Assert(FC.isFPPredicate(),
"Invalid predicate in FCmp instruction!", &FC);
visitInstruction(FC);
@@ -3011,7 +3006,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
- Assert(GEP.getType()->getScalarType()->isPointerTy() &&
+ Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
GEP.getResultElementType() == ElTy,
"GEP is not of right type for indices!", &GEP, ElTy);
@@ -3027,7 +3022,7 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
unsigned IndexWidth = IndexTy->getVectorNumElements();
Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
}
- Assert(IndexTy->getScalarType()->isIntegerTy(),
+ Assert(IndexTy->isIntOrIntVectorTy(),
"All GEP indices should be of integer type");
}
}
@@ -3113,7 +3108,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
ElTy, &LI);
checkAtomicMemAccessSize(ElTy, &LI);
} else {
- Assert(LI.getSynchScope() == CrossThread,
+ Assert(LI.getSyncScopeID() == SyncScope::System,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
@@ -3142,7 +3137,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
ElTy, &SI);
checkAtomicMemAccessSize(ElTy, &SI);
} else {
- Assert(SI.getSynchScope() == CrossThread,
+ Assert(SI.getSyncScopeID() == SyncScope::System,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
}
visitInstruction(SI);
@@ -4049,6 +4044,73 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
"incorrect alignment of the source argument", CS);
break;
}
+ case Intrinsic::memmove_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemMoveInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
+ CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0),
+ SrcAlignment = CS.getParamAlignment(1);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
+ Assert(IsValidAlignment(SrcAlignment),
+ "incorrect alignment of the source argument", CS);
+ break;
+ }
+ case Intrinsic::memset_element_unordered_atomic: {
+ auto *MI = cast<ElementUnorderedAtomicMemSetInst>(CS.getInstruction());
+
+ ConstantInt *ElementSizeCI =
+ dyn_cast<ConstantInt>(MI->getRawElementSizeInBytes());
+ Assert(ElementSizeCI,
+ "element size of the element-wise unordered atomic memory "
+ "intrinsic must be a constant int",
+ CS);
+ const APInt &ElementSizeVal = ElementSizeCI->getValue();
+ Assert(ElementSizeVal.isPowerOf2(),
+ "element size of the element-wise atomic memory intrinsic "
+ "must be a power of 2",
+ CS);
+
+ if (auto *LengthCI = dyn_cast<ConstantInt>(MI->getLength())) {
+ uint64_t Length = LengthCI->getZExtValue();
+ uint64_t ElementSize = MI->getElementSizeInBytes();
+ Assert((Length % ElementSize) == 0,
+ "constant length must be a multiple of the element size in the "
+ "element-wise atomic memory intrinsic",
+ CS);
+ }
+
+ auto IsValidAlignment = [&](uint64_t Alignment) {
+ return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
+ };
+ uint64_t DstAlignment = CS.getParamAlignment(0);
+ Assert(IsValidAlignment(DstAlignment),
+ "incorrect alignment of the destination argument", CS);
+ break;
+ }
case Intrinsic::gcroot:
case Intrinsic::gcwrite:
case Intrinsic::gcread:
@@ -4253,7 +4315,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// relocated pointer. It can be casted to the correct type later if it's
// desired. However, they must have the same address space and 'vectorness'
GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
- Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(),
+ Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
"gc.relocate: relocated value must be a gc pointer", CS);
auto ResultType = CS.getType();