summaryrefslogtreecommitdiff
path: root/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'lib/IR')
-rw-r--r--lib/IR/AsmWriter.cpp11
-rw-r--r--lib/IR/AttributeImpl.h6
-rw-r--r--lib/IR/Attributes.cpp84
-rw-r--r--lib/IR/AutoUpgrade.cpp157
-rw-r--r--lib/IR/BasicBlock.cpp7
-rw-r--r--lib/IR/ConstantFold.cpp25
-rw-r--r--lib/IR/ConstantRange.cpp76
-rw-r--r--lib/IR/Constants.cpp56
-rw-r--r--lib/IR/ConstantsContext.h12
-rw-r--r--lib/IR/Core.cpp114
-rw-r--r--lib/IR/DIBuilder.cpp2
-rw-r--r--lib/IR/DataLayout.cpp163
-rw-r--r--lib/IR/DebugInfo.cpp22
-rw-r--r--lib/IR/DebugInfoMetadata.cpp34
-rw-r--r--lib/IR/DiagnosticInfo.cpp11
-rw-r--r--lib/IR/Function.cpp103
-rw-r--r--lib/IR/Globals.cpp51
-rw-r--r--lib/IR/IRBuilder.cpp8
-rw-r--r--lib/IR/IRPrintingPasses.cpp18
-rw-r--r--lib/IR/InlineAsm.cpp10
-rw-r--r--lib/IR/Instruction.cpp2
-rw-r--r--lib/IR/Instructions.cpp167
-rw-r--r--lib/IR/IntrinsicInst.cpp15
-rw-r--r--lib/IR/LLVMContext.cpp31
-rw-r--r--lib/IR/LLVMContextImpl.cpp2
-rw-r--r--lib/IR/LegacyPassManager.cpp20
-rw-r--r--lib/IR/MDBuilder.cpp12
-rw-r--r--lib/IR/Metadata.cpp18
-rw-r--r--lib/IR/Module.cpp2
-rw-r--r--lib/IR/RemarkStreamer.cpp72
-rw-r--r--lib/IR/SafepointIRVerifier.cpp4
-rw-r--r--lib/IR/Type.cpp27
-rw-r--r--lib/IR/Value.cpp111
-rw-r--r--lib/IR/Verifier.cpp167
34 files changed, 1147 insertions, 473 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index eb5760daecb3..b0c26e0ecaf5 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -352,6 +352,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::PreserveAll: Out << "preserve_allcc"; break;
case CallingConv::CXX_FAST_TLS: Out << "cxx_fast_tlscc"; break;
case CallingConv::GHC: Out << "ghccc"; break;
+ case CallingConv::Tail: Out << "tailcc"; break;
case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break;
case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break;
case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break;
@@ -835,7 +836,7 @@ SlotTracker *ModuleSlotTracker::getMachine() {
ShouldCreateStorage = false;
MachineStorage =
- llvm::make_unique<SlotTracker>(M, ShouldInitializeAllMetadata);
+ std::make_unique<SlotTracker>(M, ShouldInitializeAllMetadata);
Machine = MachineStorage.get();
return Machine;
}
@@ -2312,7 +2313,7 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
if (const MDNode *N = dyn_cast<MDNode>(MD)) {
std::unique_ptr<SlotTracker> MachineStorage;
if (!Machine) {
- MachineStorage = make_unique<SlotTracker>(Context);
+ MachineStorage = std::make_unique<SlotTracker>(Context);
Machine = MachineStorage.get();
}
int Slot = Machine->getMetadataSlot(N);
@@ -2950,7 +2951,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) {
FunctionSummary::FFlags FFlags = FS->fflags();
if (FFlags.ReadNone | FFlags.ReadOnly | FFlags.NoRecurse |
- FFlags.ReturnDoesNotAlias) {
+ FFlags.ReturnDoesNotAlias | FFlags.NoInline) {
Out << ", funcFlags: (";
Out << "readNone: " << FFlags.ReadNone;
Out << ", readOnly: " << FFlags.ReadOnly;
@@ -3553,6 +3554,10 @@ void AssemblyWriter::printArgument(const Argument *Arg, AttributeSet Attrs) {
if (Arg->hasName()) {
Out << ' ';
PrintLLVMName(Out, Arg);
+ } else {
+ int Slot = Machine.getLocalSlot(Arg);
+ assert(Slot != -1 && "expect argument in function here");
+ Out << " %" << Slot;
}
}
diff --git a/lib/IR/AttributeImpl.h b/lib/IR/AttributeImpl.h
index f989fa3b910e..15e488bbb13b 100644
--- a/lib/IR/AttributeImpl.h
+++ b/lib/IR/AttributeImpl.h
@@ -159,7 +159,7 @@ public:
};
class TypeAttributeImpl : public EnumAttributeImpl {
- virtual void anchor();
+ void anchor() override;
Type *Ty;
@@ -208,8 +208,8 @@ public:
Attribute getAttribute(Attribute::AttrKind Kind) const;
Attribute getAttribute(StringRef Kind) const;
- unsigned getAlignment() const;
- unsigned getStackAlignment() const;
+ MaybeAlign getAlignment() const;
+ MaybeAlign getStackAlignment() const;
uint64_t getDereferenceableBytes() const;
uint64_t getDereferenceableOrNullBytes() const;
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp
index bb90bcd7dd74..cc370e628e9a 100644
--- a/lib/IR/Attributes.cpp
+++ b/lib/IR/Attributes.cpp
@@ -142,17 +142,14 @@ Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
return Attribute(PA);
}
-Attribute Attribute::getWithAlignment(LLVMContext &Context, uint64_t Align) {
- assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
- assert(Align <= 0x40000000 && "Alignment too large.");
- return get(Context, Alignment, Align);
+Attribute Attribute::getWithAlignment(LLVMContext &Context, Align A) {
+ assert(A <= 0x40000000 && "Alignment too large.");
+ return get(Context, Alignment, A.value());
}
-Attribute Attribute::getWithStackAlignment(LLVMContext &Context,
- uint64_t Align) {
- assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
- assert(Align <= 0x100 && "Alignment too large.");
- return get(Context, StackAlignment, Align);
+Attribute Attribute::getWithStackAlignment(LLVMContext &Context, Align A) {
+ assert(A <= 0x100 && "Alignment too large.");
+ return get(Context, StackAlignment, A.value());
}
Attribute Attribute::getWithDereferenceableBytes(LLVMContext &Context,
@@ -244,16 +241,16 @@ bool Attribute::hasAttribute(StringRef Kind) const {
return pImpl && pImpl->hasAttribute(Kind);
}
-unsigned Attribute::getAlignment() const {
+MaybeAlign Attribute::getAlignment() const {
assert(hasAttribute(Attribute::Alignment) &&
"Trying to get alignment from non-alignment attribute!");
- return pImpl->getValueAsInt();
+ return MaybeAlign(pImpl->getValueAsInt());
}
-unsigned Attribute::getStackAlignment() const {
+MaybeAlign Attribute::getStackAlignment() const {
assert(hasAttribute(Attribute::StackAlignment) &&
"Trying to get alignment from non-alignment attribute!");
- return pImpl->getValueAsInt();
+ return MaybeAlign(pImpl->getValueAsInt());
}
uint64_t Attribute::getDereferenceableBytes() const {
@@ -670,12 +667,12 @@ Attribute AttributeSet::getAttribute(StringRef Kind) const {
return SetNode ? SetNode->getAttribute(Kind) : Attribute();
}
-unsigned AttributeSet::getAlignment() const {
- return SetNode ? SetNode->getAlignment() : 0;
+MaybeAlign AttributeSet::getAlignment() const {
+ return SetNode ? SetNode->getAlignment() : None;
}
-unsigned AttributeSet::getStackAlignment() const {
- return SetNode ? SetNode->getStackAlignment() : 0;
+MaybeAlign AttributeSet::getStackAlignment() const {
+ return SetNode ? SetNode->getStackAlignment() : None;
}
uint64_t AttributeSet::getDereferenceableBytes() const {
@@ -782,10 +779,12 @@ AttributeSetNode *AttributeSetNode::get(LLVMContext &C, const AttrBuilder &B) {
Attr = Attribute::getWithByValType(C, B.getByValType());
break;
case Attribute::Alignment:
- Attr = Attribute::getWithAlignment(C, B.getAlignment());
+ assert(B.getAlignment() && "Alignment must be set");
+ Attr = Attribute::getWithAlignment(C, *B.getAlignment());
break;
case Attribute::StackAlignment:
- Attr = Attribute::getWithStackAlignment(C, B.getStackAlignment());
+ assert(B.getStackAlignment() && "StackAlignment must be set");
+ Attr = Attribute::getWithStackAlignment(C, *B.getStackAlignment());
break;
case Attribute::Dereferenceable:
Attr = Attribute::getWithDereferenceableBytes(
@@ -836,18 +835,18 @@ Attribute AttributeSetNode::getAttribute(StringRef Kind) const {
return {};
}
-unsigned AttributeSetNode::getAlignment() const {
+MaybeAlign AttributeSetNode::getAlignment() const {
for (const auto I : *this)
if (I.hasAttribute(Attribute::Alignment))
return I.getAlignment();
- return 0;
+ return None;
}
-unsigned AttributeSetNode::getStackAlignment() const {
+MaybeAlign AttributeSetNode::getStackAlignment() const {
for (const auto I : *this)
if (I.hasAttribute(Attribute::StackAlignment))
return I.getStackAlignment();
- return 0;
+ return None;
}
Type *AttributeSetNode::getByValType() const {
@@ -1164,8 +1163,8 @@ AttributeList AttributeList::addAttributes(LLVMContext &C, unsigned Index,
#ifndef NDEBUG
// FIXME it is not obvious how this should work for alignment. For now, say
// we can't change a known alignment.
- unsigned OldAlign = getAttributes(Index).getAlignment();
- unsigned NewAlign = B.getAlignment();
+ const MaybeAlign OldAlign = getAttributes(Index).getAlignment();
+ const MaybeAlign NewAlign = B.getAlignment();
assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
"Attempt to change alignment!");
#endif
@@ -1349,11 +1348,11 @@ Attribute AttributeList::getAttribute(unsigned Index, StringRef Kind) const {
return getAttributes(Index).getAttribute(Kind);
}
-unsigned AttributeList::getRetAlignment() const {
+MaybeAlign AttributeList::getRetAlignment() const {
return getAttributes(ReturnIndex).getAlignment();
}
-unsigned AttributeList::getParamAlignment(unsigned ArgNo) const {
+MaybeAlign AttributeList::getParamAlignment(unsigned ArgNo) const {
return getAttributes(ArgNo + FirstArgIndex).getAlignment();
}
@@ -1361,8 +1360,7 @@ Type *AttributeList::getParamByValType(unsigned Index) const {
return getAttributes(Index+FirstArgIndex).getByValType();
}
-
-unsigned AttributeList::getStackAlignment(unsigned Index) const {
+MaybeAlign AttributeList::getStackAlignment(unsigned Index) const {
return getAttributes(Index).getStackAlignment();
}
@@ -1438,7 +1436,9 @@ AttrBuilder::AttrBuilder(AttributeSet AS) {
void AttrBuilder::clear() {
Attrs.reset();
TargetDepAttrs.clear();
- Alignment = StackAlignment = DerefBytes = DerefOrNullBytes = 0;
+ Alignment.reset();
+ StackAlignment.reset();
+ DerefBytes = DerefOrNullBytes = 0;
AllocSizeArgs = 0;
ByValType = nullptr;
}
@@ -1486,9 +1486,9 @@ AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
Attrs[Val] = false;
if (Val == Attribute::Alignment)
- Alignment = 0;
+ Alignment.reset();
else if (Val == Attribute::StackAlignment)
- StackAlignment = 0;
+ StackAlignment.reset();
else if (Val == Attribute::ByVal)
ByValType = nullptr;
else if (Val == Attribute::Dereferenceable)
@@ -1517,23 +1517,23 @@ std::pair<unsigned, Optional<unsigned>> AttrBuilder::getAllocSizeArgs() const {
return unpackAllocSizeArgs(AllocSizeArgs);
}
-AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) {
- if (Align == 0) return *this;
+AttrBuilder &AttrBuilder::addAlignmentAttr(MaybeAlign Align) {
+ if (!Align)
+ return *this;
- assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
- assert(Align <= 0x40000000 && "Alignment too large.");
+ assert(*Align <= 0x40000000 && "Alignment too large.");
Attrs[Attribute::Alignment] = true;
Alignment = Align;
return *this;
}
-AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align) {
+AttrBuilder &AttrBuilder::addStackAlignmentAttr(MaybeAlign Align) {
// Default alignment, allow the target to define how to align it.
- if (Align == 0) return *this;
+ if (!Align)
+ return *this;
- assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
- assert(Align <= 0x100 && "Alignment too large.");
+ assert(*Align <= 0x100 && "Alignment too large.");
Attrs[Attribute::StackAlignment] = true;
StackAlignment = Align;
@@ -1610,10 +1610,10 @@ AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
AttrBuilder &AttrBuilder::remove(const AttrBuilder &B) {
// FIXME: What if both have alignments, but they don't match?!
if (B.Alignment)
- Alignment = 0;
+ Alignment.reset();
if (B.StackAlignment)
- StackAlignment = 0;
+ StackAlignment.reset();
if (B.DerefBytes)
DerefBytes = 0;
diff --git a/lib/IR/AutoUpgrade.cpp b/lib/IR/AutoUpgrade.cpp
index a2d820352825..79f580d0e14d 100644
--- a/lib/IR/AutoUpgrade.cpp
+++ b/lib/IR/AutoUpgrade.cpp
@@ -490,12 +490,6 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
assert(F && "Illegal to upgrade a non-existent Function.");
- // Upgrade intrinsics "clang.arc.use" which doesn't start with "llvm.".
- if (F->getName() == "clang.arc.use") {
- NewFn = nullptr;
- return true;
- }
-
// Quickly eliminate it, if it's not a candidate.
StringRef Name = F->getName();
if (Name.size() <= 8 || !Name.startswith("llvm."))
@@ -528,7 +522,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
F->arg_begin()->getType());
return true;
}
- Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
+ static const Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
if (vldRegex.match(Name)) {
auto fArgs = F->getFunctionType()->params();
SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
@@ -539,7 +533,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
"llvm." + Name + ".p0i8", F->getParent());
return true;
}
- Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
+ static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
if (vstRegex.match(Name)) {
static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
Intrinsic::arm_neon_vst2,
@@ -604,7 +598,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
case 'e': {
SmallVector<StringRef, 2> Groups;
- Regex R("^experimental.vector.reduce.([a-z]+)\\.[fi][0-9]+");
+ static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[fi][0-9]+");
if (R.match(Name, &Groups)) {
Intrinsic::ID ID = Intrinsic::not_intrinsic;
if (Groups[1] == "fadd")
@@ -789,6 +783,19 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
+ case 'p':
+ if (Name == "prefetch") {
+ // Handle address space overloading.
+ Type *Tys[] = {F->arg_begin()->getType()};
+ if (F->getName() != Intrinsic::getName(Intrinsic::prefetch, Tys)) {
+ rename(F);
+ NewFn =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys);
+ return true;
+ }
+ }
+ break;
+
case 's':
if (Name == "stackprotectorcheck") {
NewFn = nullptr;
@@ -1648,14 +1655,6 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Get the Function's name.
StringRef Name = F->getName();
- // clang.arc.use is an old name for llvm.arc.clang.arc.use. It is dropped
- // from upgrader because the optimizer now only recognizes intrinsics for
- // ARC runtime calls.
- if (Name == "clang.arc.use") {
- CI->eraseFromParent();
- return;
- }
-
assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
Name = Name.substr(5);
@@ -3831,7 +3830,9 @@ bool llvm::UpgradeDebugInfo(Module &M) {
return Modified;
}
-bool llvm::UpgradeRetainReleaseMarker(Module &M) {
+/// This checks for objc retain release marker which should be upgraded. It
+/// returns true if module is modified.
+static bool UpgradeRetainReleaseMarker(Module &M) {
bool Changed = false;
const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey);
@@ -3855,6 +3856,106 @@ bool llvm::UpgradeRetainReleaseMarker(Module &M) {
return Changed;
}
+void llvm::UpgradeARCRuntime(Module &M) {
+ // This lambda converts normal function calls to ARC runtime functions to
+ // intrinsic calls.
+ auto UpgradeToIntrinsic = [&](const char *OldFunc,
+ llvm::Intrinsic::ID IntrinsicFunc) {
+ Function *Fn = M.getFunction(OldFunc);
+
+ if (!Fn)
+ return;
+
+ Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc);
+
+ for (auto I = Fn->user_begin(), E = Fn->user_end(); I != E;) {
+ CallInst *CI = dyn_cast<CallInst>(*I++);
+ if (!CI || CI->getCalledFunction() != Fn)
+ continue;
+
+ IRBuilder<> Builder(CI->getParent(), CI->getIterator());
+ FunctionType *NewFuncTy = NewFn->getFunctionType();
+ SmallVector<Value *, 2> Args;
+
+ for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) {
+ Value *Arg = CI->getArgOperand(I);
+ // Bitcast argument to the parameter type of the new function if it's
+ // not a variadic argument.
+ if (I < NewFuncTy->getNumParams())
+ Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I));
+ Args.push_back(Arg);
+ }
+
+ // Create a call instruction that calls the new function.
+ CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args);
+ NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
+ NewCall->setName(CI->getName());
+
+ // Bitcast the return value back to the type of the old call.
+ Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType());
+
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewRetVal);
+ CI->eraseFromParent();
+ }
+
+ if (Fn->use_empty())
+ Fn->eraseFromParent();
+ };
+
+ // Unconditionally convert a call to "clang.arc.use" to a call to
+ // "llvm.objc.clang.arc.use".
+ UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use);
+
+ // Upgrade the retain release marker. If there is no need to upgrade
+ // the marker, that means either the module is already new enough to contain
+ // new intrinsics or it is not ARC. There is no need to upgrade runtime call.
+ if (!UpgradeRetainReleaseMarker(M))
+ return;
+
+ std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = {
+ {"objc_autorelease", llvm::Intrinsic::objc_autorelease},
+ {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop},
+ {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush},
+ {"objc_autoreleaseReturnValue",
+ llvm::Intrinsic::objc_autoreleaseReturnValue},
+ {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak},
+ {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak},
+ {"objc_initWeak", llvm::Intrinsic::objc_initWeak},
+ {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak},
+ {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained},
+ {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak},
+ {"objc_release", llvm::Intrinsic::objc_release},
+ {"objc_retain", llvm::Intrinsic::objc_retain},
+ {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease},
+ {"objc_retainAutoreleaseReturnValue",
+ llvm::Intrinsic::objc_retainAutoreleaseReturnValue},
+ {"objc_retainAutoreleasedReturnValue",
+ llvm::Intrinsic::objc_retainAutoreleasedReturnValue},
+ {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock},
+ {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong},
+ {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak},
+ {"objc_unsafeClaimAutoreleasedReturnValue",
+ llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue},
+ {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject},
+ {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject},
+ {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer},
+ {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease},
+ {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter},
+ {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit},
+ {"objc_arc_annotation_topdown_bbstart",
+ llvm::Intrinsic::objc_arc_annotation_topdown_bbstart},
+ {"objc_arc_annotation_topdown_bbend",
+ llvm::Intrinsic::objc_arc_annotation_topdown_bbend},
+ {"objc_arc_annotation_bottomup_bbstart",
+ llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart},
+ {"objc_arc_annotation_bottomup_bbend",
+ llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}};
+
+ for (auto &I : RuntimeFuncs)
+ UpgradeToIntrinsic(I.first, I.second);
+}
+
bool llvm::UpgradeModuleFlags(Module &M) {
NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
if (!ModFlags)
@@ -4012,3 +4113,23 @@ MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
return MDTuple::get(T->getContext(), Ops);
}
+
+std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
+ std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64";
+
+ // If X86, and the datalayout matches the expected format, add pointer size
+ // address spaces to the datalayout.
+ Triple::ArchType Arch = Triple(TT).getArch();
+ if ((Arch != llvm::Triple::x86 && Arch != llvm::Triple::x86_64) ||
+ DL.contains(AddrSpaces))
+ return DL;
+
+ SmallVector<StringRef, 4> Groups;
+ Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)");
+ if (!R.match(DL, &Groups))
+ return DL;
+
+ SmallString<1024> Buf;
+ std::string Res = (Groups[1] + AddrSpaces + Groups[3]).toStringRef(Buf).str();
+ return Res;
+}
diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp
index 34410712645d..bdee6990f932 100644
--- a/lib/IR/BasicBlock.cpp
+++ b/lib/IR/BasicBlock.cpp
@@ -107,6 +107,13 @@ BasicBlock::instructionsWithoutDebug() {
return make_filter_range(*this, Fn);
}
+filter_iterator<BasicBlock::const_iterator,
+ std::function<bool(const Instruction &)>>::difference_type
+BasicBlock::sizeWithoutDebug() const {
+ return std::distance(instructionsWithoutDebug().begin(),
+ instructionsWithoutDebug().end());
+}
+
void BasicBlock::removeFromParent() {
getParent()->getBasicBlockList().remove(getIterator());
}
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 835fbb3443b8..71fa795ec294 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -746,7 +746,7 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
ConstantInt::get(Ty, i));
Constant *V2Element = ConstantExpr::getExtractElement(V2,
ConstantInt::get(Ty, i));
- Constant *Cond = dyn_cast<Constant>(CondV->getOperand(i));
+ auto *Cond = cast<Constant>(CondV->getOperand(i));
if (V1Element == V2Element) {
V = V1Element;
} else if (isa<UndefValue>(Cond)) {
@@ -787,12 +787,9 @@ Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
Constant *Idx) {
- if (isa<UndefValue>(Val)) // ee(undef, x) -> undef
- return UndefValue::get(Val->getType()->getVectorElementType());
- if (Val->isNullValue()) // ee(zero, x) -> zero
- return Constant::getNullValue(Val->getType()->getVectorElementType());
- // ee({w,x,y,z}, undef) -> undef
- if (isa<UndefValue>(Idx))
+ // extractelt undef, C -> undef
+ // extractelt C, undef -> undef
+ if (isa<UndefValue>(Val) || isa<UndefValue>(Idx))
return UndefValue::get(Val->getType()->getVectorElementType());
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx)) {
@@ -1125,7 +1122,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
isa<GlobalValue>(CE1->getOperand(0))) {
GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
- unsigned GVAlign;
+ MaybeAlign GVAlign;
if (Module *TheModule = GV->getParent()) {
GVAlign = GV->getPointerAlignment(TheModule->getDataLayout());
@@ -1139,19 +1136,19 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
// increased code size (see https://reviews.llvm.org/D55115)
// FIXME: This code should be deleted once existing targets have
// appropriate defaults
- if (GVAlign == 0U && isa<Function>(GV))
- GVAlign = 4U;
+ if (!GVAlign && isa<Function>(GV))
+ GVAlign = Align(4);
} else if (isa<Function>(GV)) {
// Without a datalayout we have to assume the worst case: that the
// function pointer isn't aligned at all.
- GVAlign = 0U;
+ GVAlign = llvm::None;
} else {
- GVAlign = GV->getAlignment();
+ GVAlign = MaybeAlign(GV->getAlignment());
}
- if (GVAlign > 1) {
+ if (GVAlign && *GVAlign > 1) {
unsigned DstWidth = CI2->getType()->getBitWidth();
- unsigned SrcWidth = std::min(DstWidth, Log2_32(GVAlign));
+ unsigned SrcWidth = std::min(DstWidth, Log2(*GVAlign));
APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth));
// If checking bits we know are clear, return zero.
diff --git a/lib/IR/ConstantRange.cpp b/lib/IR/ConstantRange.cpp
index 920fdc01a14f..642bf0f39342 100644
--- a/lib/IR/ConstantRange.cpp
+++ b/lib/IR/ConstantRange.cpp
@@ -269,6 +269,27 @@ ConstantRange::makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
return makeExactMulNSWRegion(Other.getSignedMin())
.intersectWith(makeExactMulNSWRegion(Other.getSignedMax()));
+
+ case Instruction::Shl: {
+ // For given range of shift amounts, if we ignore all illegal shift amounts
+ // (that always produce poison), what shift amount range is left?
+ ConstantRange ShAmt = Other.intersectWith(
+ ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, (BitWidth - 1) + 1)));
+ if (ShAmt.isEmptySet()) {
+ // If the entire range of shift amounts is already poison-producing,
+ // then we can freely add more poison-producing flags ontop of that.
+ return getFull(BitWidth);
+ }
+ // There are some legal shift amounts, we can compute conservatively-correct
+ // range of no-wrap inputs. Note that by now we have clamped the ShAmtUMax
+ // to be at most bitwidth-1, which results in most conservative range.
+ APInt ShAmtUMax = ShAmt.getUnsignedMax();
+ if (Unsigned)
+ return getNonEmpty(APInt::getNullValue(BitWidth),
+ APInt::getMaxValue(BitWidth).lshr(ShAmtUMax) + 1);
+ return getNonEmpty(APInt::getSignedMinValue(BitWidth).ashr(ShAmtUMax),
+ APInt::getSignedMaxValue(BitWidth).ashr(ShAmtUMax) + 1);
+ }
}
}
@@ -815,14 +836,55 @@ ConstantRange::add(const ConstantRange &Other) const {
return X;
}
-ConstantRange ConstantRange::addWithNoSignedWrap(const APInt &Other) const {
- // Calculate the subset of this range such that "X + Other" is
- // guaranteed not to wrap (overflow) for all X in this subset.
- auto NSWRange = ConstantRange::makeExactNoWrapRegion(
- BinaryOperator::Add, Other, OverflowingBinaryOperator::NoSignedWrap);
- auto NSWConstrainedRange = intersectWith(NSWRange);
+ConstantRange ConstantRange::addWithNoWrap(const ConstantRange &Other,
+ unsigned NoWrapKind,
+ PreferredRangeType RangeType) const {
+ // Calculate the range for "X + Y" which is guaranteed not to wrap(overflow).
+ // (X is from this, and Y is from Other)
+ if (isEmptySet() || Other.isEmptySet())
+ return getEmpty();
+ if (isFullSet() && Other.isFullSet())
+ return getFull();
+
+ using OBO = OverflowingBinaryOperator;
+ ConstantRange Result = add(Other);
+
+ auto addWithNoUnsignedWrap = [this](const ConstantRange &Other) {
+ APInt LMin = getUnsignedMin(), LMax = getUnsignedMax();
+ APInt RMin = Other.getUnsignedMin(), RMax = Other.getUnsignedMax();
+ bool Overflow;
+ APInt NewMin = LMin.uadd_ov(RMin, Overflow);
+ if (Overflow)
+ return getEmpty();
+ APInt NewMax = LMax.uadd_sat(RMax);
+ return getNonEmpty(std::move(NewMin), std::move(NewMax) + 1);
+ };
+
+ auto addWithNoSignedWrap = [this](const ConstantRange &Other) {
+ APInt LMin = getSignedMin(), LMax = getSignedMax();
+ APInt RMin = Other.getSignedMin(), RMax = Other.getSignedMax();
+ if (LMin.isNonNegative()) {
+ bool Overflow;
+ APInt Temp = LMin.sadd_ov(RMin, Overflow);
+ if (Overflow)
+ return getEmpty();
+ }
+ if (LMax.isNegative()) {
+ bool Overflow;
+ APInt Temp = LMax.sadd_ov(RMax, Overflow);
+ if (Overflow)
+ return getEmpty();
+ }
+ APInt NewMin = LMin.sadd_sat(RMin);
+ APInt NewMax = LMax.sadd_sat(RMax);
+ return getNonEmpty(std::move(NewMin), std::move(NewMax) + 1);
+ };
- return NSWConstrainedRange.add(ConstantRange(Other));
+ if (NoWrapKind & OBO::NoSignedWrap)
+ Result = Result.intersectWith(addWithNoSignedWrap(Other), RangeType);
+ if (NoWrapKind & OBO::NoUnsignedWrap)
+ Result = Result.intersectWith(addWithNoUnsignedWrap(Other), RangeType);
+ return Result;
}
ConstantRange
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index ff551da29ae6..f792f01efc1a 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
@@ -250,6 +251,20 @@ bool Constant::isNaN() const {
return true;
}
+bool Constant::isElementWiseEqual(Value *Y) const {
+ // Are they fully identical?
+ if (this == Y)
+ return true;
+ // They may still be identical element-wise (if they have `undef`s).
+ auto *Cy = dyn_cast<Constant>(Y);
+ if (!Cy)
+ return false;
+ return PatternMatch::match(ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_EQ,
+ const_cast<Constant *>(this),
+ Cy),
+ PatternMatch::m_One());
+}
+
bool Constant::containsUndefElement() const {
if (!getType()->isVectorTy())
return false;
@@ -502,22 +517,32 @@ bool Constant::needsRelocation() const {
if (const BlockAddress *BA = dyn_cast<BlockAddress>(this))
return BA->getFunction()->needsRelocation();
- // While raw uses of blockaddress need to be relocated, differences between
- // two of them don't when they are for labels in the same function. This is a
- // common idiom when creating a table for the indirect goto extension, so we
- // handle it efficiently here.
- if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(this))
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(this)) {
if (CE->getOpcode() == Instruction::Sub) {
ConstantExpr *LHS = dyn_cast<ConstantExpr>(CE->getOperand(0));
ConstantExpr *RHS = dyn_cast<ConstantExpr>(CE->getOperand(1));
if (LHS && RHS && LHS->getOpcode() == Instruction::PtrToInt &&
- RHS->getOpcode() == Instruction::PtrToInt &&
- isa<BlockAddress>(LHS->getOperand(0)) &&
- isa<BlockAddress>(RHS->getOperand(0)) &&
- cast<BlockAddress>(LHS->getOperand(0))->getFunction() ==
- cast<BlockAddress>(RHS->getOperand(0))->getFunction())
- return false;
+ RHS->getOpcode() == Instruction::PtrToInt) {
+ Constant *LHSOp0 = LHS->getOperand(0);
+ Constant *RHSOp0 = RHS->getOperand(0);
+
+ // While raw uses of blockaddress need to be relocated, differences
+ // between two of them don't when they are for labels in the same
+ // function. This is a common idiom when creating a table for the
+ // indirect goto extension, so we handle it efficiently here.
+ if (isa<BlockAddress>(LHSOp0) && isa<BlockAddress>(RHSOp0) &&
+ cast<BlockAddress>(LHSOp0)->getFunction() ==
+ cast<BlockAddress>(RHSOp0)->getFunction())
+ return false;
+
+ // Relative pointers do not need to be dynamically relocated.
+ if (auto *LHSGV = dyn_cast<GlobalValue>(LHSOp0->stripPointerCasts()))
+ if (auto *RHSGV = dyn_cast<GlobalValue>(RHSOp0->stripPointerCasts()))
+ if (LHSGV->isDSOLocal() && RHSGV->isDSOLocal())
+ return false;
+ }
}
+ }
bool Result = false;
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
@@ -563,13 +588,10 @@ void Constant::removeDeadConstantUsers() const {
}
// If the constant was dead, then the iterator is invalidated.
- if (LastNonDeadUser == E) {
+ if (LastNonDeadUser == E)
I = user_begin();
- if (I == E) break;
- } else {
- I = LastNonDeadUser;
- ++I;
- }
+ else
+ I = std::next(LastNonDeadUser);
}
}
diff --git a/lib/IR/ConstantsContext.h b/lib/IR/ConstantsContext.h
index 7614dab9f15d..1ec9087551f8 100644
--- a/lib/IR/ConstantsContext.h
+++ b/lib/IR/ConstantsContext.h
@@ -480,14 +480,16 @@ struct ConstantExprKeyType {
: Opcode(CE->getOpcode()),
SubclassOptionalData(CE->getRawSubclassOptionalData()),
SubclassData(CE->isCompare() ? CE->getPredicate() : 0), Ops(Operands),
- Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()) {}
+ Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()),
+ ExplicitTy(nullptr) {}
ConstantExprKeyType(const ConstantExpr *CE,
SmallVectorImpl<Constant *> &Storage)
: Opcode(CE->getOpcode()),
SubclassOptionalData(CE->getRawSubclassOptionalData()),
SubclassData(CE->isCompare() ? CE->getPredicate() : 0),
- Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()) {
+ Indexes(CE->hasIndices() ? CE->getIndices() : ArrayRef<unsigned>()),
+ ExplicitTy(nullptr) {
assert(Storage.empty() && "Expected empty storage");
for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I)
Storage.push_back(CE->getOperand(I));
@@ -676,9 +678,9 @@ public:
/// Hash once, and reuse it for the lookup and the insertion if needed.
LookupKeyHashed Lookup(MapInfo::getHashValue(Key), Key);
- auto I = Map.find_as(Lookup);
- if (I != Map.end())
- return *I;
+ auto ItMap = Map.find_as(Lookup);
+ if (ItMap != Map.end())
+ return *ItMap;
// Update to the new value. Optimize for the case when we have a single
// operand that we're changing, but handle bulk updates efficiently.
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 310935b5213a..a5f46b16e600 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -140,7 +140,16 @@ unsigned LLVMGetLastEnumAttributeKind(void) {
LLVMAttributeRef LLVMCreateEnumAttribute(LLVMContextRef C, unsigned KindID,
uint64_t Val) {
- return wrap(Attribute::get(*unwrap(C), (Attribute::AttrKind)KindID, Val));
+ auto &Ctx = *unwrap(C);
+ auto AttrKind = (Attribute::AttrKind)KindID;
+
+ if (AttrKind == Attribute::AttrKind::ByVal) {
+ // After r362128, byval attributes need to have a type attribute. Provide a
+ // NULL one until a proper API is added for this.
+ return wrap(Attribute::getWithByValType(Ctx, NULL));
+ } else {
+ return wrap(Attribute::get(Ctx, AttrKind, Val));
+ }
}
unsigned LLVMGetEnumAttributeKind(LLVMAttributeRef A) {
@@ -386,7 +395,7 @@ void LLVMDumpModule(LLVMModuleRef M) {
LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
char **ErrorMessage) {
std::error_code EC;
- raw_fd_ostream dest(Filename, EC, sys::fs::F_Text);
+ raw_fd_ostream dest(Filename, EC, sys::fs::OF_Text);
if (EC) {
*ErrorMessage = strdup(EC.message().c_str());
return true;
@@ -1999,13 +2008,13 @@ unsigned LLVMGetAlignment(LLVMValueRef V) {
void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
Value *P = unwrap<Value>(V);
if (GlobalObject *GV = dyn_cast<GlobalObject>(P))
- GV->setAlignment(Bytes);
+ GV->setAlignment(MaybeAlign(Bytes));
else if (AllocaInst *AI = dyn_cast<AllocaInst>(P))
- AI->setAlignment(Bytes);
+ AI->setAlignment(MaybeAlign(Bytes));
else if (LoadInst *LI = dyn_cast<LoadInst>(P))
- LI->setAlignment(Bytes);
+ LI->setAlignment(MaybeAlign(Bytes));
else if (StoreInst *SI = dyn_cast<StoreInst>(P))
- SI->setAlignment(Bytes);
+ SI->setAlignment(MaybeAlign(Bytes));
else
llvm_unreachable(
"only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
@@ -2480,7 +2489,7 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) {
void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) {
Argument *A = unwrap<Argument>(Arg);
- A->addAttr(Attribute::getWithAlignment(A->getContext(), align));
+ A->addAttr(Attribute::getWithAlignment(A->getContext(), Align(align)));
}
/*--.. Operations on ifuncs ................................................--*/
@@ -2779,7 +2788,8 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align) {
auto *Call = unwrap<CallBase>(Instr);
- Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align);
+ Attribute AlignAttr =
+ Attribute::getWithAlignment(Call->getContext(), Align(align));
Call->addAttribute(index, AlignAttr);
}
@@ -3518,6 +3528,47 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid AtomicOrdering value!");
}
+static AtomicRMWInst::BinOp mapFromLLVMRMWBinOp(LLVMAtomicRMWBinOp BinOp) {
+ switch (BinOp) {
+ case LLVMAtomicRMWBinOpXchg: return AtomicRMWInst::Xchg;
+ case LLVMAtomicRMWBinOpAdd: return AtomicRMWInst::Add;
+ case LLVMAtomicRMWBinOpSub: return AtomicRMWInst::Sub;
+ case LLVMAtomicRMWBinOpAnd: return AtomicRMWInst::And;
+ case LLVMAtomicRMWBinOpNand: return AtomicRMWInst::Nand;
+ case LLVMAtomicRMWBinOpOr: return AtomicRMWInst::Or;
+ case LLVMAtomicRMWBinOpXor: return AtomicRMWInst::Xor;
+ case LLVMAtomicRMWBinOpMax: return AtomicRMWInst::Max;
+ case LLVMAtomicRMWBinOpMin: return AtomicRMWInst::Min;
+ case LLVMAtomicRMWBinOpUMax: return AtomicRMWInst::UMax;
+ case LLVMAtomicRMWBinOpUMin: return AtomicRMWInst::UMin;
+ case LLVMAtomicRMWBinOpFAdd: return AtomicRMWInst::FAdd;
+ case LLVMAtomicRMWBinOpFSub: return AtomicRMWInst::FSub;
+ }
+
+ llvm_unreachable("Invalid LLVMAtomicRMWBinOp value!");
+}
+
+static LLVMAtomicRMWBinOp mapToLLVMRMWBinOp(AtomicRMWInst::BinOp BinOp) {
+ switch (BinOp) {
+ case AtomicRMWInst::Xchg: return LLVMAtomicRMWBinOpXchg;
+ case AtomicRMWInst::Add: return LLVMAtomicRMWBinOpAdd;
+ case AtomicRMWInst::Sub: return LLVMAtomicRMWBinOpSub;
+ case AtomicRMWInst::And: return LLVMAtomicRMWBinOpAnd;
+ case AtomicRMWInst::Nand: return LLVMAtomicRMWBinOpNand;
+ case AtomicRMWInst::Or: return LLVMAtomicRMWBinOpOr;
+ case AtomicRMWInst::Xor: return LLVMAtomicRMWBinOpXor;
+ case AtomicRMWInst::Max: return LLVMAtomicRMWBinOpMax;
+ case AtomicRMWInst::Min: return LLVMAtomicRMWBinOpMin;
+ case AtomicRMWInst::UMax: return LLVMAtomicRMWBinOpUMax;
+ case AtomicRMWInst::UMin: return LLVMAtomicRMWBinOpUMin;
+ case AtomicRMWInst::FAdd: return LLVMAtomicRMWBinOpFAdd;
+ case AtomicRMWInst::FSub: return LLVMAtomicRMWBinOpFSub;
+ default: break;
+ }
+
+ llvm_unreachable("Invalid AtomicRMWBinOp value!");
+}
+
// TODO: Should this and other atomic instructions support building with
// "syncscope"?
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
@@ -3593,14 +3644,30 @@ LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) {
Value *P = unwrap<Value>(MemAccessInst);
if (LoadInst *LI = dyn_cast<LoadInst>(P))
return LI->isVolatile();
- return cast<StoreInst>(P)->isVolatile();
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->isVolatile();
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
+ return AI->isVolatile();
+ return cast<AtomicCmpXchgInst>(P)->isVolatile();
}
void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) {
Value *P = unwrap<Value>(MemAccessInst);
if (LoadInst *LI = dyn_cast<LoadInst>(P))
return LI->setVolatile(isVolatile);
- return cast<StoreInst>(P)->setVolatile(isVolatile);
+ if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ return SI->setVolatile(isVolatile);
+ if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(P))
+ return AI->setVolatile(isVolatile);
+ return cast<AtomicCmpXchgInst>(P)->setVolatile(isVolatile);
+}
+
+LLVMBool LLVMGetWeak(LLVMValueRef CmpXchgInst) {
+ return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->isWeak();
+}
+
+void LLVMSetWeak(LLVMValueRef CmpXchgInst, LLVMBool isWeak) {
+ return unwrap<AtomicCmpXchgInst>(CmpXchgInst)->setWeak(isWeak);
}
LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemAccessInst) {
@@ -3608,8 +3675,10 @@ LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemAccessInst) {
AtomicOrdering O;
if (LoadInst *LI = dyn_cast<LoadInst>(P))
O = LI->getOrdering();
+ else if (StoreInst *SI = dyn_cast<StoreInst>(P))
+ O = SI->getOrdering();
else
- O = cast<StoreInst>(P)->getOrdering();
+ O = cast<AtomicRMWInst>(P)->getOrdering();
return mapToLLVMOrdering(O);
}
@@ -3622,6 +3691,14 @@ void LLVMSetOrdering(LLVMValueRef MemAccessInst, LLVMAtomicOrdering Ordering) {
return cast<StoreInst>(P)->setOrdering(O);
}
+LLVMAtomicRMWBinOp LLVMGetAtomicRMWBinOp(LLVMValueRef Inst) {
+ return mapToLLVMRMWBinOp(unwrap<AtomicRMWInst>(Inst)->getOperation());
+}
+
+void LLVMSetAtomicRMWBinOp(LLVMValueRef Inst, LLVMAtomicRMWBinOp BinOp) {
+ unwrap<AtomicRMWInst>(Inst)->setOperation(mapFromLLVMRMWBinOp(BinOp));
+}
+
/*--.. Casts ...............................................................--*/
LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef B, LLVMValueRef Val,
@@ -3840,20 +3917,7 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
LLVMValueRef PTR, LLVMValueRef Val,
LLVMAtomicOrdering ordering,
LLVMBool singleThread) {
- AtomicRMWInst::BinOp intop;
- switch (op) {
- case LLVMAtomicRMWBinOpXchg: intop = AtomicRMWInst::Xchg; break;
- case LLVMAtomicRMWBinOpAdd: intop = AtomicRMWInst::Add; break;
- case LLVMAtomicRMWBinOpSub: intop = AtomicRMWInst::Sub; break;
- case LLVMAtomicRMWBinOpAnd: intop = AtomicRMWInst::And; break;
- case LLVMAtomicRMWBinOpNand: intop = AtomicRMWInst::Nand; break;
- case LLVMAtomicRMWBinOpOr: intop = AtomicRMWInst::Or; break;
- case LLVMAtomicRMWBinOpXor: intop = AtomicRMWInst::Xor; break;
- case LLVMAtomicRMWBinOpMax: intop = AtomicRMWInst::Max; break;
- case LLVMAtomicRMWBinOpMin: intop = AtomicRMWInst::Min; break;
- case LLVMAtomicRMWBinOpUMax: intop = AtomicRMWInst::UMax; break;
- case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
- }
+ AtomicRMWInst::BinOp intop = mapFromLLVMRMWBinOp(op);
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
mapFromLLVMOrdering(ordering), singleThread ? SyncScope::SingleThread
: SyncScope::System));
diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp
index 2493c6cbe532..5d5671227430 100644
--- a/lib/IR/DIBuilder.cpp
+++ b/lib/IR/DIBuilder.cpp
@@ -25,7 +25,7 @@
using namespace llvm;
using namespace llvm::dwarf;
-cl::opt<bool>
+static cl::opt<bool>
UseDbgAddr("use-dbg-addr",
llvm::cl::desc("Use llvm.dbg.addr for all local variables"),
cl::init(false), cl::Hidden);
diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp
index 6e0ebbd4a730..5fe7a2e94b6a 100644
--- a/lib/IR/DataLayout.cpp
+++ b/lib/IR/DataLayout.cpp
@@ -29,6 +29,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TypeSize.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -44,7 +45,6 @@ using namespace llvm;
StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
assert(!ST->isOpaque() && "Cannot get layout of opaque structs");
- StructAlignment = 0;
StructSize = 0;
IsPadded = false;
NumElements = ST->getNumElements();
@@ -52,10 +52,10 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
// Loop over each of the elements, placing them in memory.
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Type *Ty = ST->getElementType(i);
- unsigned TyAlign = ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty);
+ const Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty));
// Add padding if necessary to align the data element properly.
- if ((StructSize & (TyAlign-1)) != 0) {
+ if (!isAligned(TyAlign, StructSize)) {
IsPadded = true;
StructSize = alignTo(StructSize, TyAlign);
}
@@ -67,12 +67,9 @@ StructLayout::StructLayout(StructType *ST, const DataLayout &DL) {
StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item
}
- // Empty structures have alignment of 1 byte.
- if (StructAlignment == 0) StructAlignment = 1;
-
// Add padding to the end of the struct so that it could be put in an array
// and all array elements would be aligned correctly.
- if ((StructSize & (StructAlignment-1)) != 0) {
+ if (!isAligned(StructAlignment, StructSize)) {
IsPadded = true;
StructSize = alignTo(StructSize, StructAlignment);
}
@@ -102,9 +99,8 @@ unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const {
// LayoutAlignElem, LayoutAlign support
//===----------------------------------------------------------------------===//
-LayoutAlignElem
-LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width) {
+LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
assert(abi_align <= pref_align && "Preferred alignment worse than ABI!");
LayoutAlignElem retval;
retval.AlignType = align_type;
@@ -126,10 +122,9 @@ LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const {
// PointerAlignElem, PointerAlign support
//===----------------------------------------------------------------------===//
-PointerAlignElem
-PointerAlignElem::get(uint32_t AddressSpace, unsigned ABIAlign,
- unsigned PrefAlign, uint32_t TypeByteWidth,
- uint32_t IndexWidth) {
+PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
+ uint32_t IndexWidth) {
assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!");
PointerAlignElem retval;
retval.AddressSpace = AddressSpace;
@@ -162,18 +157,18 @@ const char *DataLayout::getManglingComponent(const Triple &T) {
}
static const LayoutAlignElem DefaultAlignments[] = {
- { INTEGER_ALIGN, 1, 1, 1 }, // i1
- { INTEGER_ALIGN, 8, 1, 1 }, // i8
- { INTEGER_ALIGN, 16, 2, 2 }, // i16
- { INTEGER_ALIGN, 32, 4, 4 }, // i32
- { INTEGER_ALIGN, 64, 4, 8 }, // i64
- { FLOAT_ALIGN, 16, 2, 2 }, // half
- { FLOAT_ALIGN, 32, 4, 4 }, // float
- { FLOAT_ALIGN, 64, 8, 8 }, // double
- { FLOAT_ALIGN, 128, 16, 16 }, // ppcf128, quad, ...
- { VECTOR_ALIGN, 64, 8, 8 }, // v2i32, v1i64, ...
- { VECTOR_ALIGN, 128, 16, 16 }, // v16i8, v8i16, v4i32, ...
- { AGGREGATE_ALIGN, 0, 0, 8 } // struct
+ {INTEGER_ALIGN, 1, Align(1), Align(1)}, // i1
+ {INTEGER_ALIGN, 8, Align(1), Align(1)}, // i8
+ {INTEGER_ALIGN, 16, Align(2), Align(2)}, // i16
+ {INTEGER_ALIGN, 32, Align(4), Align(4)}, // i32
+ {INTEGER_ALIGN, 64, Align(4), Align(8)}, // i64
+ {FLOAT_ALIGN, 16, Align(2), Align(2)}, // half
+ {FLOAT_ALIGN, 32, Align(4), Align(4)}, // float
+ {FLOAT_ALIGN, 64, Align(8), Align(8)}, // double
+ {FLOAT_ALIGN, 128, Align(16), Align(16)}, // ppcf128, quad, ...
+ {VECTOR_ALIGN, 64, Align(8), Align(8)}, // v2i32, v1i64, ...
+ {VECTOR_ALIGN, 128, Align(16), Align(16)}, // v16i8, v8i16, v4i32, ...
+ {AGGREGATE_ALIGN, 0, Align(1), Align(8)} // struct
};
void DataLayout::reset(StringRef Desc) {
@@ -182,9 +177,9 @@ void DataLayout::reset(StringRef Desc) {
LayoutMap = nullptr;
BigEndian = false;
AllocaAddrSpace = 0;
- StackNaturalAlign = 0;
+ StackNaturalAlign.reset();
ProgramAddrSpace = 0;
- FunctionPtrAlign = 0;
+ FunctionPtrAlign.reset();
TheFunctionPtrAlignType = FunctionPtrAlignType::Independent;
ManglingMode = MM_None;
NonIntegralAddressSpaces.clear();
@@ -194,7 +189,7 @@ void DataLayout::reset(StringRef Desc) {
setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign,
E.TypeBitWidth);
}
- setPointerAlignment(0, 8, 8, 8, 8);
+ setPointerAlignment(0, Align(8), Align(8), 8, 8);
parseSpecifier(Desc);
}
@@ -320,8 +315,9 @@ void DataLayout::parseSpecifier(StringRef Desc) {
report_fatal_error("Invalid index size of 0 bytes");
}
}
- setPointerAlignment(AddrSpace, PointerABIAlign, PointerPrefAlign,
- PointerMemSize, IndexSize);
+ setPointerAlignment(AddrSpace, assumeAligned(PointerABIAlign),
+ assumeAligned(PointerPrefAlign), PointerMemSize,
+ IndexSize);
break;
}
case 'i':
@@ -349,11 +345,16 @@ void DataLayout::parseSpecifier(StringRef Desc) {
report_fatal_error(
"Missing alignment specification in datalayout string");
Split = split(Rest, ':');
- unsigned ABIAlign = inBytes(getInt(Tok));
+ const unsigned ABIAlign = inBytes(getInt(Tok));
if (AlignType != AGGREGATE_ALIGN && !ABIAlign)
report_fatal_error(
"ABI alignment specification must be >0 for non-aggregate types");
+ if (!isUInt<16>(ABIAlign))
+ report_fatal_error("Invalid ABI alignment, must be a 16bit integer");
+ if (ABIAlign != 0 && !isPowerOf2_64(ABIAlign))
+ report_fatal_error("Invalid ABI alignment, must be a power of 2");
+
// Preferred alignment.
unsigned PrefAlign = ABIAlign;
if (!Rest.empty()) {
@@ -361,7 +362,14 @@ void DataLayout::parseSpecifier(StringRef Desc) {
PrefAlign = inBytes(getInt(Tok));
}
- setAlignment(AlignType, ABIAlign, PrefAlign, Size);
+ if (!isUInt<16>(PrefAlign))
+ report_fatal_error(
+ "Invalid preferred alignment, must be a 16bit integer");
+ if (PrefAlign != 0 && !isPowerOf2_64(PrefAlign))
+ report_fatal_error("Invalid preferred alignment, must be a power of 2");
+
+ setAlignment(AlignType, assumeAligned(ABIAlign), assumeAligned(PrefAlign),
+ Size);
break;
}
@@ -378,7 +386,10 @@ void DataLayout::parseSpecifier(StringRef Desc) {
}
break;
case 'S': { // Stack natural alignment.
- StackNaturalAlign = inBytes(getInt(Tok));
+ uint64_t Alignment = inBytes(getInt(Tok));
+ if (Alignment != 0 && !llvm::isPowerOf2_64(Alignment))
+ report_fatal_error("Alignment is neither 0 nor a power of 2");
+ StackNaturalAlign = MaybeAlign(Alignment);
break;
}
case 'F': {
@@ -394,7 +405,10 @@ void DataLayout::parseSpecifier(StringRef Desc) {
"datalayout string");
}
Tok = Tok.substr(1);
- FunctionPtrAlign = inBytes(getInt(Tok));
+ uint64_t Alignment = inBytes(getInt(Tok));
+ if (Alignment != 0 && !llvm::isPowerOf2_64(Alignment))
+ report_fatal_error("Alignment is neither 0 nor a power of 2");
+ FunctionPtrAlign = MaybeAlign(Alignment);
break;
}
case 'P': { // Function address space.
@@ -468,20 +482,15 @@ DataLayout::findAlignmentLowerBound(AlignTypeEnum AlignType,
});
}
-void
-DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align,
- unsigned pref_align, uint32_t bit_width) {
+void DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align,
+ Align pref_align, uint32_t bit_width) {
+ // AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as
+ // uint16_t, it is unclear if there are requirements for alignment to be less
+ // than 2^16 other than storage. In the meantime we leave the restriction as
+ // an assert. See D67400 for context.
+ assert(Log2(abi_align) < 16 && Log2(pref_align) < 16 && "Alignment too big");
if (!isUInt<24>(bit_width))
report_fatal_error("Invalid bit width, must be a 24bit integer");
- if (!isUInt<16>(abi_align))
- report_fatal_error("Invalid ABI alignment, must be a 16bit integer");
- if (!isUInt<16>(pref_align))
- report_fatal_error("Invalid preferred alignment, must be a 16bit integer");
- if (abi_align != 0 && !isPowerOf2_64(abi_align))
- report_fatal_error("Invalid ABI alignment, must be a power of 2");
- if (pref_align != 0 && !isPowerOf2_64(pref_align))
- report_fatal_error("Invalid preferred alignment, must be a power of 2");
-
if (pref_align < abi_align)
report_fatal_error(
"Preferred alignment cannot be less than the ABI alignment");
@@ -507,8 +516,8 @@ DataLayout::findPointerLowerBound(uint32_t AddressSpace) {
});
}
-void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
- unsigned PrefAlign, uint32_t TypeByteWidth,
+void DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign,
+ Align PrefAlign, uint32_t TypeByteWidth,
uint32_t IndexWidth) {
if (PrefAlign < ABIAlign)
report_fatal_error(
@@ -528,9 +537,8 @@ void DataLayout::setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or
/// preferred if ABIInfo = false) the layout wants for the specified datatype.
-unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
- uint32_t BitWidth, bool ABIInfo,
- Type *Ty) const {
+Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, uint32_t BitWidth,
+ bool ABIInfo, Type *Ty) const {
AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth);
// See if we found an exact match. Of if we are looking for an integer type,
// but don't have an exact match take the next largest integer. This is where
@@ -549,10 +557,11 @@ unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
} else if (AlignType == VECTOR_ALIGN) {
// By default, use natural alignment for vector types. This is consistent
// with what clang and llvm-gcc do.
- unsigned Align = getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
- Align *= cast<VectorType>(Ty)->getNumElements();
- Align = PowerOf2Ceil(Align);
- return Align;
+ unsigned Alignment =
+ getTypeAllocSize(cast<VectorType>(Ty)->getElementType());
+ Alignment *= cast<VectorType>(Ty)->getNumElements();
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
// If we still couldn't find a reasonable default alignment, fall back
@@ -561,9 +570,9 @@ unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType,
// approximation of reality, and if the user wanted something less
// less conservative, they should have specified it explicitly in the data
// layout.
- unsigned Align = getTypeStoreSize(Ty);
- Align = PowerOf2Ceil(Align);
- return Align;
+ unsigned Alignment = getTypeStoreSize(Ty);
+ Alignment = PowerOf2Ceil(Alignment);
+ return Align(Alignment);
}
namespace {
@@ -624,7 +633,7 @@ const StructLayout *DataLayout::getStructLayout(StructType *Ty) const {
return L;
}
-unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
+Align DataLayout::getPointerABIAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -633,7 +642,7 @@ unsigned DataLayout::getPointerABIAlignment(unsigned AS) const {
return I->ABIAlign;
}
-unsigned DataLayout::getPointerPrefAlignment(unsigned AS) const {
+Align DataLayout::getPointerPrefAlignment(unsigned AS) const {
PointersTy::const_iterator I = findPointerLowerBound(AS);
if (I == Pointers.end() || I->AddressSpace != AS) {
I = findPointerLowerBound(0);
@@ -690,21 +699,18 @@ unsigned DataLayout::getIndexTypeSizeInBits(Type *Ty) const {
Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref
== false) for the requested type \a Ty.
*/
-unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
+Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
AlignTypeEnum AlignType;
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
// Early escape for the non-numeric types.
case Type::LabelTyID:
- return (abi_or_pref
- ? getPointerABIAlignment(0)
- : getPointerPrefAlignment(0));
+ return abi_or_pref ? getPointerABIAlignment(0) : getPointerPrefAlignment(0);
case Type::PointerTyID: {
unsigned AS = cast<PointerType>(Ty)->getAddressSpace();
- return (abi_or_pref
- ? getPointerABIAlignment(AS)
- : getPointerPrefAlignment(AS));
+ return abi_or_pref ? getPointerABIAlignment(AS)
+ : getPointerPrefAlignment(AS);
}
case Type::ArrayTyID:
return getAlignment(cast<ArrayType>(Ty)->getElementType(), abi_or_pref);
@@ -712,11 +718,11 @@ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
case Type::StructTyID: {
// Packed structure types always have an ABI alignment of one.
if (cast<StructType>(Ty)->isPacked() && abi_or_pref)
- return 1;
+ return Align::None();
// Get the layout annotation... which is lazily created on demand.
const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
- unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
+ const Align Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty);
return std::max(Align, Layout->getAlignment());
}
case Type::IntegerTyID:
@@ -740,27 +746,24 @@ unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const {
llvm_unreachable("Bad type for getAlignment!!!");
}
- return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty), abi_or_pref, Ty);
+ // If we're dealing with a scalable vector, we just need the known minimum
+ // size for determining alignment. If not, we'll get the exact size.
+ return getAlignmentInfo(AlignType, getTypeSizeInBits(Ty).getKnownMinSize(),
+ abi_or_pref, Ty);
}
unsigned DataLayout::getABITypeAlignment(Type *Ty) const {
- return getAlignment(Ty, true);
+ return getAlignment(Ty, true).value();
}
/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for
/// an integer type of the specified bitwidth.
-unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
+Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr);
}
unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const {
- return getAlignment(Ty, false);
-}
-
-unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
- unsigned Align = getPrefTypeAlignment(Ty);
- assert(!(Align & (Align-1)) && "Alignment is not a power of two!");
- return Log2_32(Align);
+ return getAlignment(Ty, false).value();
}
IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp
index ce47ef207434..1bbe6b85d260 100644
--- a/lib/IR/DebugInfo.cpp
+++ b/lib/IR/DebugInfo.cpp
@@ -279,7 +279,7 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
}
static MDNode *stripDebugLocFromLoopID(MDNode *N) {
- assert(!empty(N->operands()) && "Missing self reference?");
+ assert(!N->operands().empty() && "Missing self reference?");
// if there is no debug location, we do not have to rewrite this MDNode.
if (std::none_of(N->op_begin() + 1, N->op_end(), [](const MDOperand &Op) {
@@ -929,6 +929,26 @@ const char *LLVMDIFileGetSource(LLVMMetadataRef File, unsigned *Len) {
return "";
}
+LLVMMetadataRef LLVMDIBuilderCreateMacro(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef ParentMacroFile,
+ unsigned Line,
+ LLVMDWARFMacinfoRecordType RecordType,
+ const char *Name, size_t NameLen,
+ const char *Value, size_t ValueLen) {
+ return wrap(
+ unwrap(Builder)->createMacro(unwrapDI<DIMacroFile>(ParentMacroFile), Line,
+ static_cast<MacinfoRecordType>(RecordType),
+ {Name, NameLen}, {Value, ValueLen}));
+}
+
+LLVMMetadataRef
+LLVMDIBuilderCreateTempMacroFile(LLVMDIBuilderRef Builder,
+ LLVMMetadataRef ParentMacroFile, unsigned Line,
+ LLVMMetadataRef File) {
+ return wrap(unwrap(Builder)->createTempMacroFile(
+ unwrapDI<DIMacroFile>(ParentMacroFile), Line, unwrapDI<DIFile>(File)));
+}
+
LLVMMetadataRef LLVMDIBuilderCreateEnumerator(LLVMDIBuilderRef Builder,
const char *Name, size_t NameLen,
int64_t Value,
diff --git a/lib/IR/DebugInfoMetadata.cpp b/lib/IR/DebugInfoMetadata.cpp
index 900df27d1d33..94ec3abfa7a2 100644
--- a/lib/IR/DebugInfoMetadata.cpp
+++ b/lib/IR/DebugInfoMetadata.cpp
@@ -828,15 +828,23 @@ DIExpression *DIExpression::getImpl(LLVMContext &Context,
}
unsigned DIExpression::ExprOperand::getSize() const {
- switch (getOp()) {
+ uint64_t Op = getOp();
+
+ if (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31)
+ return 2;
+
+ switch (Op) {
case dwarf::DW_OP_LLVM_convert:
case dwarf::DW_OP_LLVM_fragment:
+ case dwarf::DW_OP_bregx:
return 3;
case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_consts:
case dwarf::DW_OP_deref_size:
case dwarf::DW_OP_plus_uconst:
case dwarf::DW_OP_LLVM_tag_offset:
- case dwarf::DW_OP_entry_value:
+ case dwarf::DW_OP_LLVM_entry_value:
+ case dwarf::DW_OP_regx:
return 2;
default:
return 1;
@@ -849,8 +857,13 @@ bool DIExpression::isValid() const {
if (I->get() + I->getSize() > E->get())
return false;
+ uint64_t Op = I->getOp();
+ if ((Op >= dwarf::DW_OP_reg0 && Op <= dwarf::DW_OP_reg31) ||
+ (Op >= dwarf::DW_OP_breg0 && Op <= dwarf::DW_OP_breg31))
+ return true;
+
// Check that the operand is valid.
- switch (I->getOp()) {
+ switch (Op) {
default:
return false;
case dwarf::DW_OP_LLVM_fragment:
@@ -877,10 +890,12 @@ bool DIExpression::isValid() const {
return false;
break;
}
- case dwarf::DW_OP_entry_value: {
- // An entry value operator must appear at the begin and the size
- // of following expression should be 1, because we support only
- // entry values of a simple register location.
+ case dwarf::DW_OP_LLVM_entry_value: {
+ // An entry value operator must appear at the beginning and the number of
+ // operations it cover can currently only be 1, because we support only
+ // entry values of a simple register location. One reason for this is that
+ // we currently can't calculate the size of the resulting DWARF block for
+ // other expressions.
return I->get() == expr_op_begin()->get() && I->getArg(0) == 1 &&
getNumElements() == 2;
}
@@ -905,6 +920,8 @@ bool DIExpression::isValid() const {
case dwarf::DW_OP_lit0:
case dwarf::DW_OP_not:
case dwarf::DW_OP_dup:
+ case dwarf::DW_OP_regx:
+ case dwarf::DW_OP_bregx:
break;
}
}
@@ -1035,7 +1052,7 @@ DIExpression *DIExpression::prependOpcodes(const DIExpression *Expr,
assert(Expr && "Can't prepend ops to this expression");
if (EntryValue) {
- Ops.push_back(dwarf::DW_OP_entry_value);
+ Ops.push_back(dwarf::DW_OP_LLVM_entry_value);
// Add size info needed for entry value expression.
// Add plus one for target register operand.
Ops.push_back(Expr->getNumElements() + 1);
@@ -1146,6 +1163,7 @@ Optional<DIExpression *> DIExpression::createFragmentExpression(
Op.appendToVector(Ops);
}
}
+ assert(Expr && "Unknown DIExpression");
Ops.push_back(dwarf::DW_OP_LLVM_fragment);
Ops.push_back(OffsetInBits);
Ops.push_back(SizeInBits);
diff --git a/lib/IR/DiagnosticInfo.cpp b/lib/IR/DiagnosticInfo.cpp
index 4a8e3cca3493..99d5aec3f043 100644
--- a/lib/IR/DiagnosticInfo.cpp
+++ b/lib/IR/DiagnosticInfo.cpp
@@ -370,5 +370,16 @@ std::string DiagnosticInfoOptimizationBase::getMsg() const {
return OS.str();
}
+DiagnosticInfoMisExpect::DiagnosticInfoMisExpect(const Instruction *Inst,
+ Twine &Msg)
+ : DiagnosticInfoWithLocationBase(DK_MisExpect, DS_Warning,
+ *Inst->getParent()->getParent(),
+ Inst->getDebugLoc()),
+ Msg(Msg) {}
+
+void DiagnosticInfoMisExpect::print(DiagnosticPrinter &DP) const {
+ DP << getLocationStr() << ": " << getMsg();
+}
+
void OptimizationRemarkAnalysisFPCommute::anchor() {}
void OptimizationRemarkAnalysisAliasing::anchor() {}
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index dc28d22548dd..3f70d2c904e5 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -251,7 +251,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
// We only need a symbol table for a function if the context keeps value names
if (!getContext().shouldDiscardValueNames())
- SymTab = make_unique<ValueSymbolTable>();
+ SymTab = std::make_unique<ValueSymbolTable>();
// If the function has arguments, mark them as lazily built.
if (Ty->getNumParams())
@@ -293,7 +293,8 @@ void Function::BuildLazyArguments() const {
// Clear the lazy arguments bit.
unsigned SDC = getSubclassDataFromValue();
- const_cast<Function*>(this)->setValueSubclassData(SDC &= ~(1<<0));
+ SDC &= ~(1 << 0);
+ const_cast<Function*>(this)->setValueSubclassData(SDC);
assert(!hasLazyArguments());
}
@@ -611,9 +612,11 @@ static std::string getMangledTypeStr(Type* Ty) {
Result += "vararg";
// Ensure nested function types are distinguishable.
Result += "f";
- } else if (isa<VectorType>(Ty)) {
- Result += "v" + utostr(Ty->getVectorNumElements()) +
- getMangledTypeStr(Ty->getVectorElementType());
+ } else if (VectorType* VTy = dyn_cast<VectorType>(Ty)) {
+ if (VTy->isScalable())
+ Result += "nx";
+ Result += "v" + utostr(VTy->getVectorNumElements()) +
+ getMangledTypeStr(VTy->getVectorElementType());
} else if (Ty) {
switch (Ty->getTypeID()) {
default: llvm_unreachable("Unhandled type");
@@ -700,7 +703,11 @@ enum IIT_Info {
IIT_STRUCT7 = 39,
IIT_STRUCT8 = 40,
IIT_F128 = 41,
- IIT_VEC_ELEMENT = 42
+ IIT_VEC_ELEMENT = 42,
+ IIT_SCALABLE_VEC = 43,
+ IIT_SUBDIVIDE2_ARG = 44,
+ IIT_SUBDIVIDE4_ARG = 45,
+ IIT_VEC_OF_BITCASTS_TO_INT = 46
};
static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
@@ -865,12 +872,36 @@ static void DecodeIITType(unsigned &NextElt, ArrayRef<unsigned char> Infos,
DecodeIITType(NextElt, Infos, OutputTable);
return;
}
+ case IIT_SUBDIVIDE2_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide2Argument,
+ ArgInfo));
+ return;
+ }
+ case IIT_SUBDIVIDE4_ARG: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::Subdivide4Argument,
+ ArgInfo));
+ return;
+ }
case IIT_VEC_ELEMENT: {
unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecElementArgument,
ArgInfo));
return;
}
+ case IIT_SCALABLE_VEC: {
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::ScalableVecArgument,
+ 0));
+ DecodeIITType(NextElt, Infos, OutputTable);
+ return;
+ }
+ case IIT_VEC_OF_BITCASTS_TO_INT: {
+ unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
+ OutputTable.push_back(IITDescriptor::get(IITDescriptor::VecOfBitcastsToInt,
+ ArgInfo));
+ return;
+ }
}
llvm_unreachable("unhandled");
}
@@ -961,6 +992,14 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
assert(ITy->getBitWidth() % 2 == 0);
return IntegerType::get(Context, ITy->getBitWidth() / 2);
}
+ case IITDescriptor::Subdivide2Argument:
+ case IITDescriptor::Subdivide4Argument: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ assert(VTy && "Expected an argument of Vector Type");
+ int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
+ return VectorType::getSubdividedVectorType(VTy, SubDivs);
+ }
case IITDescriptor::HalfVecArgument:
return VectorType::getHalfElementsVectorType(cast<VectorType>(
Tys[D.getArgumentNumber()]));
@@ -968,7 +1007,7 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
Type *EltTy = DecodeFixedType(Infos, Tys, Context);
Type *Ty = Tys[D.getArgumentNumber()];
if (auto *VTy = dyn_cast<VectorType>(Ty))
- return VectorType::get(EltTy, VTy->getNumElements());
+ return VectorType::get(EltTy, VTy->getElementCount());
return EltTy;
}
case IITDescriptor::PtrToArgument: {
@@ -989,9 +1028,20 @@ static Type *DecodeFixedType(ArrayRef<Intrinsic::IITDescriptor> &Infos,
return VTy->getElementType();
llvm_unreachable("Expected an argument of Vector Type");
}
+ case IITDescriptor::VecOfBitcastsToInt: {
+ Type *Ty = Tys[D.getArgumentNumber()];
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ assert(VTy && "Expected an argument of Vector Type");
+ return VectorType::getInteger(VTy);
+ }
case IITDescriptor::VecOfAnyPtrsToElt:
// Return the overloaded type (which determines the pointers address space)
return Tys[D.getOverloadArgNumber()];
+ case IITDescriptor::ScalableVecArgument: {
+ Type *Ty = DecodeFixedType(Infos, Tys, Context);
+ return VectorType::get(Ty->getVectorElementType(),
+ { Ty->getVectorNumElements(), true });
+ }
}
llvm_unreachable("unhandled");
}
@@ -1174,8 +1224,9 @@ static bool matchIntrinsicType(
}
case IITDescriptor::HalfVecArgument:
// If this is a forward reference, defer the check for later.
- return D.getArgumentNumber() >= ArgTys.size() ||
- !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ return !isa<VectorType>(ArgTys[D.getArgumentNumber()]) ||
VectorType::getHalfElementsVectorType(
cast<VectorType>(ArgTys[D.getArgumentNumber()])) != Ty;
case IITDescriptor::SameVecWidthArgument: {
@@ -1191,8 +1242,8 @@ static bool matchIntrinsicType(
return true;
Type *EltTy = Ty;
if (ThisArgType) {
- if (ReferenceType->getVectorNumElements() !=
- ThisArgType->getVectorNumElements())
+ if (ReferenceType->getElementCount() !=
+ ThisArgType->getElementCount())
return true;
EltTy = ThisArgType->getVectorElementType();
}
@@ -1255,6 +1306,36 @@ static bool matchIntrinsicType(
auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
return !ReferenceType || Ty != ReferenceType->getElementType();
}
+ case IITDescriptor::Subdivide2Argument:
+ case IITDescriptor::Subdivide4Argument: {
+ // If this is a forward reference, defer the check for later.
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+
+ Type *NewTy = ArgTys[D.getArgumentNumber()];
+ if (auto *VTy = dyn_cast<VectorType>(NewTy)) {
+ int SubDivs = D.Kind == IITDescriptor::Subdivide2Argument ? 1 : 2;
+ NewTy = VectorType::getSubdividedVectorType(VTy, SubDivs);
+ return Ty != NewTy;
+ }
+ return true;
+ }
+ case IITDescriptor::ScalableVecArgument: {
+ VectorType *VTy = dyn_cast<VectorType>(Ty);
+ if (!VTy || !VTy->isScalable())
+ return true;
+ return matchIntrinsicType(VTy, Infos, ArgTys, DeferredChecks,
+ IsDeferredCheck);
+ }
+ case IITDescriptor::VecOfBitcastsToInt: {
+ if (D.getArgumentNumber() >= ArgTys.size())
+ return IsDeferredCheck || DeferCheck(Ty);
+ auto *ReferenceType = dyn_cast<VectorType>(ArgTys[D.getArgumentNumber()]);
+ auto *ThisArgVecTy = dyn_cast<VectorType>(Ty);
+ if (!ThisArgVecTy || !ReferenceType)
+ return true;
+ return ThisArgVecTy != VectorType::getInteger(ReferenceType);
+ }
}
llvm_unreachable("unhandled");
}
diff --git a/lib/IR/Globals.cpp b/lib/IR/Globals.cpp
index e2bfc0420bc5..46a9696b2944 100644
--- a/lib/IR/Globals.cpp
+++ b/lib/IR/Globals.cpp
@@ -114,18 +114,22 @@ unsigned GlobalValue::getAddressSpace() const {
}
void GlobalObject::setAlignment(unsigned Align) {
- assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
- assert(Align <= MaximumAlignment &&
+ setAlignment(MaybeAlign(Align));
+}
+
+void GlobalObject::setAlignment(MaybeAlign Align) {
+ assert((!Align || Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
- unsigned AlignmentData = Log2_32(Align) + 1;
+ unsigned AlignmentData = encode(Align);
unsigned OldData = getGlobalValueSubClassData();
setGlobalValueSubClassData((OldData & ~AlignmentMask) | AlignmentData);
- assert(getAlignment() == Align && "Alignment representation error!");
+ assert(MaybeAlign(getAlignment()) == Align &&
+ "Alignment representation error!");
}
void GlobalObject::copyAttributesFrom(const GlobalObject *Src) {
GlobalValue::copyAttributesFrom(Src);
- setAlignment(Src->getAlignment());
+ setAlignment(MaybeAlign(Src->getAlignment()));
setSection(Src->getSection());
}
@@ -427,6 +431,43 @@ GlobalIndirectSymbol::GlobalIndirectSymbol(Type *Ty, ValueTy VTy,
Op<0>() = Symbol;
}
+static const GlobalObject *
+findBaseObject(const Constant *C, DenseSet<const GlobalAlias *> &Aliases) {
+ if (auto *GO = dyn_cast<GlobalObject>(C))
+ return GO;
+ if (auto *GA = dyn_cast<GlobalAlias>(C))
+ if (Aliases.insert(GA).second)
+ return findBaseObject(GA->getOperand(0), Aliases);
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ switch (CE->getOpcode()) {
+ case Instruction::Add: {
+ auto *LHS = findBaseObject(CE->getOperand(0), Aliases);
+ auto *RHS = findBaseObject(CE->getOperand(1), Aliases);
+ if (LHS && RHS)
+ return nullptr;
+ return LHS ? LHS : RHS;
+ }
+ case Instruction::Sub: {
+ if (findBaseObject(CE->getOperand(1), Aliases))
+ return nullptr;
+ return findBaseObject(CE->getOperand(0), Aliases);
+ }
+ case Instruction::IntToPtr:
+ case Instruction::PtrToInt:
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ return findBaseObject(CE->getOperand(0), Aliases);
+ default:
+ break;
+ }
+ }
+ return nullptr;
+}
+
+const GlobalObject *GlobalIndirectSymbol::getBaseObject() const {
+ DenseSet<const GlobalAlias *> Aliases;
+ return findBaseObject(getOperand(0), Aliases);
+}
//===----------------------------------------------------------------------===//
// GlobalAlias Implementation
diff --git a/lib/IR/IRBuilder.cpp b/lib/IR/IRBuilder.cpp
index 0c6461c9078f..b782012e9731 100644
--- a/lib/IR/IRBuilder.cpp
+++ b/lib/IR/IRBuilder.cpp
@@ -49,7 +49,7 @@ GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
nullptr, GlobalVariable::NotThreadLocal,
AddressSpace);
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
- GV->setAlignment(1);
+ GV->setAlignment(Align::None());
return GV;
}
@@ -289,8 +289,10 @@ CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
CallInst *CI = createCallHelper(TheFn, Ops, this);
// Set the alignment of the pointer args.
- CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
- CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
+ CI->addParamAttr(
+ 0, Attribute::getWithAlignment(CI->getContext(), Align(DstAlign)));
+ CI->addParamAttr(
+ 1, Attribute::getWithAlignment(CI->getContext(), Align(SrcAlign)));
// Set the TBAA info if present.
if (TBAATag)
diff --git a/lib/IR/IRPrintingPasses.cpp b/lib/IR/IRPrintingPasses.cpp
index 35b06135a828..953cf9410162 100644
--- a/lib/IR/IRPrintingPasses.cpp
+++ b/lib/IR/IRPrintingPasses.cpp
@@ -26,14 +26,22 @@ PrintModulePass::PrintModulePass(raw_ostream &OS, const std::string &Banner,
ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {}
PreservedAnalyses PrintModulePass::run(Module &M, ModuleAnalysisManager &) {
- if (!Banner.empty())
- OS << Banner << "\n";
- if (llvm::isFunctionInPrintList("*"))
+ if (llvm::isFunctionInPrintList("*")) {
+ if (!Banner.empty())
+ OS << Banner << "\n";
M.print(OS, nullptr, ShouldPreserveUseListOrder);
+ }
else {
- for(const auto &F : M.functions())
- if (llvm::isFunctionInPrintList(F.getName()))
+ bool BannerPrinted = false;
+ for(const auto &F : M.functions()) {
+ if (llvm::isFunctionInPrintList(F.getName())) {
+ if (!BannerPrinted && !Banner.empty()) {
+ OS << Banner << "\n";
+ BannerPrinted = true;
+ }
F.print(OS);
+ }
+ }
}
return PreservedAnalyses::all();
}
diff --git a/lib/IR/InlineAsm.cpp b/lib/IR/InlineAsm.cpp
index 99da7caaccf0..fd732f9eda8b 100644
--- a/lib/IR/InlineAsm.cpp
+++ b/lib/IR/InlineAsm.cpp
@@ -181,6 +181,16 @@ bool InlineAsm::ConstraintInfo::Parse(StringRef Str,
// FIXME: For now assuming these are 2-character constraints.
pCodes->push_back(StringRef(I+1, 2));
I += 3;
+ } else if (*I == '@') {
+ // Multi-letter constraint
+ ++I;
+ unsigned char C = static_cast<unsigned char>(*I);
+ assert(isdigit(C) && "Expected a digit!");
+ int N = C - '0';
+ assert(N > 0 && "Found a zero letter constraint!");
+ ++I;
+ pCodes->push_back(StringRef(I, N));
+ I += N;
} else {
// Single letter constraint.
pCodes->push_back(StringRef(I, 1));
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index ba5629d1662b..b157c7bb34bf 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -524,7 +524,7 @@ bool Instruction::mayReadFromMemory() const {
case Instruction::Call:
case Instruction::Invoke:
case Instruction::CallBr:
- return !cast<CallBase>(this)->doesNotAccessMemory();
+ return !cast<CallBase>(this)->doesNotReadMemory();
case Instruction::Store:
return !cast<StoreInst>(this)->isUnordered();
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index 2e7cad103c12..245c7628b08e 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -38,6 +38,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TypeSize.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -45,12 +46,6 @@
using namespace llvm;
-static cl::opt<bool> SwitchInstProfUpdateWrapperStrict(
- "switch-inst-prof-update-wrapper-strict", cl::Hidden,
- cl::desc("Assert that prof branch_weights metadata is valid when creating "
- "an instance of SwitchInstProfUpdateWrapper"),
- cl::init(false));
-
//===----------------------------------------------------------------------===//
// AllocaInst Class
//===----------------------------------------------------------------------===//
@@ -822,6 +817,17 @@ void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
setName(NameStr);
}
+void CallBrInst::updateArgBlockAddresses(unsigned i, BasicBlock *B) {
+ assert(getNumIndirectDests() > i && "IndirectDest # out of range for callbr");
+ if (BasicBlock *OldBB = getIndirectDest(i)) {
+ BlockAddress *Old = BlockAddress::get(OldBB);
+ BlockAddress *New = BlockAddress::get(B);
+ for (unsigned ArgNo = 0, e = getNumArgOperands(); ArgNo != e; ++ArgNo)
+ if (dyn_cast<BlockAddress>(getArgOperand(ArgNo)) == Old)
+ setArgOperand(ArgNo, New);
+ }
+}
+
CallBrInst::CallBrInst(const CallBrInst &CBI)
: CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
@@ -1223,7 +1229,7 @@ AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
: UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertBefore),
AllocatedType(Ty) {
- setAlignment(Align);
+ setAlignment(MaybeAlign(Align));
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
}
@@ -1234,18 +1240,21 @@ AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
: UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
AllocatedType(Ty) {
- setAlignment(Align);
+ setAlignment(MaybeAlign(Align));
assert(!Ty->isVoidTy() && "Cannot allocate void!");
setName(Name);
}
-void AllocaInst::setAlignment(unsigned Align) {
- assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
- assert(Align <= MaximumAlignment &&
+void AllocaInst::setAlignment(MaybeAlign Align) {
+ assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
- (Log2_32(Align) + 1));
- assert(getAlignment() == Align && "Alignment representation error!");
+ encode(Align));
+ if (Align)
+ assert(getAlignment() == Align->value() &&
+ "Alignment representation error!");
+ else
+ assert(getAlignment() == 0 && "Alignment representation error!");
}
bool AllocaInst::isArrayAllocation() const {
@@ -1287,36 +1296,36 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
Instruction *InsertBef)
- : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
+ : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
BasicBlock *InsertAE)
- : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
+ : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
- unsigned Align, Instruction *InsertBef)
+ MaybeAlign Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertBef) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
- unsigned Align, BasicBlock *InsertAE)
+ MaybeAlign Align, BasicBlock *InsertAE)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SyncScope::ID SSID, Instruction *InsertBef)
+ MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID,
+ Instruction *InsertBef)
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
- setAlignment(Align);
+ setAlignment(MaybeAlign(Align));
setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
- unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
+ MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAE)
: UnaryInstruction(Ty, Load, Ptr, InsertAE) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
@@ -1327,13 +1336,16 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
setName(Name);
}
-void LoadInst::setAlignment(unsigned Align) {
- assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
- assert(Align <= MaximumAlignment &&
+void LoadInst::setAlignment(MaybeAlign Align) {
+ assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
- ((Log2_32(Align)+1)<<1));
- assert(getAlignment() == Align && "Alignment representation error!");
+ (encode(Align) << 1));
+ if (Align)
+ assert(getAlignment() == Align->value() &&
+ "Alignment representation error!");
+ else
+ assert(getAlignment() == 0 && "Alignment representation error!");
}
//===----------------------------------------------------------------------===//
@@ -1359,30 +1371,28 @@ StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Instruction *InsertBefore)
- : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
+ : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
BasicBlock *InsertAtEnd)
- : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
+ : StoreInst(val, addr, isVolatile, /*Align=*/None, InsertAtEnd) {}
-StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertBefore) {}
-StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertAtEnd) {}
-StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SyncScope::ID SSID,
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
+ AtomicOrdering Order, SyncScope::ID SSID,
Instruction *InsertBefore)
- : Instruction(Type::getVoidTy(val->getContext()), Store,
- OperandTraits<StoreInst>::op_begin(this),
- OperandTraits<StoreInst>::operands(this),
- InsertBefore) {
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this), InsertBefore) {
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
@@ -1391,14 +1401,12 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
AssertOK();
}
-StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SyncScope::ID SSID,
+StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align,
+ AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
- : Instruction(Type::getVoidTy(val->getContext()), Store,
- OperandTraits<StoreInst>::op_begin(this),
- OperandTraits<StoreInst>::operands(this),
- InsertAtEnd) {
+ : Instruction(Type::getVoidTy(val->getContext()), Store,
+ OperandTraits<StoreInst>::op_begin(this),
+ OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
Op<0>() = val;
Op<1>() = addr;
setVolatile(isVolatile);
@@ -1407,13 +1415,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
AssertOK();
}
-void StoreInst::setAlignment(unsigned Align) {
- assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
- assert(Align <= MaximumAlignment &&
+void StoreInst::setAlignment(MaybeAlign Align) {
+ assert((!Align || *Align <= MaximumAlignment) &&
"Alignment is greater than MaximumAlignment!");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
- ((Log2_32(Align)+1) << 1));
- assert(getAlignment() == Align && "Alignment representation error!");
+ (encode(Align) << 1));
+ if (Align)
+ assert(getAlignment() == Align->value() &&
+ "Alignment representation error!");
+ else
+ assert(getAlignment() == 0 && "Alignment representation error!");
}
//===----------------------------------------------------------------------===//
@@ -1778,7 +1789,7 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &Name,
Instruction *InsertBefore)
: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
- cast<VectorType>(Mask->getType())->getNumElements()),
+ cast<VectorType>(Mask->getType())->getElementCount()),
ShuffleVector,
OperandTraits<ShuffleVectorInst>::op_begin(this),
OperandTraits<ShuffleVectorInst>::operands(this),
@@ -1795,7 +1806,7 @@ ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
const Twine &Name,
BasicBlock *InsertAtEnd)
: Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
- cast<VectorType>(Mask->getType())->getNumElements()),
+ cast<VectorType>(Mask->getType())->getElementCount()),
ShuffleVector,
OperandTraits<ShuffleVectorInst>::op_begin(this),
OperandTraits<ShuffleVectorInst>::operands(this),
@@ -2968,8 +2979,8 @@ bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
}
// Get the bit sizes, we'll need these
- unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
- unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+ TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
// Run through the possibilities ...
if (DestTy->isIntegerTy()) { // Casting to integral
@@ -3016,7 +3027,7 @@ bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
- if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
+ if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
// An element by element cast. Valid if casting the elements is valid.
SrcTy = SrcVecTy->getElementType();
DestTy = DestVecTy->getElementType();
@@ -3030,12 +3041,12 @@ bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
}
}
- unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
- unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
+ TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
+ TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
// Could still have vectors of pointers if the number of elements doesn't
// match
- if (SrcBits == 0 || DestBits == 0)
+ if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0)
return false;
if (SrcBits != DestBits)
@@ -3886,7 +3897,7 @@ SwitchInstProfUpdateWrapper::getProfBranchWeightsMD(const SwitchInst &SI) {
}
MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
- assert(State == Changed && "called only if metadata has changed");
+ assert(Changed && "called only if metadata has changed");
if (!Weights)
return nullptr;
@@ -3905,17 +3916,12 @@ MDNode *SwitchInstProfUpdateWrapper::buildProfBranchWeightsMD() {
void SwitchInstProfUpdateWrapper::init() {
MDNode *ProfileData = getProfBranchWeightsMD(SI);
- if (!ProfileData) {
- State = Initialized;
+ if (!ProfileData)
return;
- }
if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
- State = Invalid;
- if (SwitchInstProfUpdateWrapperStrict)
- llvm_unreachable("number of prof branch_weights metadata operands does "
- "not correspond to number of succesors");
- return;
+ llvm_unreachable("number of prof branch_weights metadata operands does "
+ "not correspond to number of succesors");
}
SmallVector<uint32_t, 8> Weights;
@@ -3924,7 +3930,6 @@ void SwitchInstProfUpdateWrapper::init() {
uint32_t CW = C->getValue().getZExtValue();
Weights.push_back(CW);
}
- State = Initialized;
this->Weights = std::move(Weights);
}
@@ -3933,7 +3938,7 @@ SwitchInstProfUpdateWrapper::removeCase(SwitchInst::CaseIt I) {
if (Weights) {
assert(SI.getNumSuccessors() == Weights->size() &&
"num of prof branch_weights must accord with num of successors");
- State = Changed;
+ Changed = true;
// Copy the last case to the place of the removed one and shrink.
// This is tightly coupled with the way SwitchInst::removeCase() removes
// the cases in SwitchInst::removeCase(CaseIt).
@@ -3948,15 +3953,12 @@ void SwitchInstProfUpdateWrapper::addCase(
SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
SI.addCase(OnVal, Dest);
- if (State == Invalid)
- return;
-
if (!Weights && W && *W) {
- State = Changed;
+ Changed = true;
Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
Weights.getValue()[SI.getNumSuccessors() - 1] = *W;
} else if (Weights) {
- State = Changed;
+ Changed = true;
Weights.getValue().push_back(W ? *W : 0);
}
if (Weights)
@@ -3967,11 +3969,9 @@ void SwitchInstProfUpdateWrapper::addCase(
SymbolTableList<Instruction>::iterator
SwitchInstProfUpdateWrapper::eraseFromParent() {
// Instruction is erased. Mark as unchanged to not touch it in the destructor.
- if (State != Invalid) {
- State = Initialized;
- if (Weights)
- Weights->resize(0);
- }
+ Changed = false;
+ if (Weights)
+ Weights->resize(0);
return SI.eraseFromParent();
}
@@ -3984,7 +3984,7 @@ SwitchInstProfUpdateWrapper::getSuccessorWeight(unsigned idx) {
void SwitchInstProfUpdateWrapper::setSuccessorWeight(
unsigned idx, SwitchInstProfUpdateWrapper::CaseWeightOpt W) {
- if (!W || State == Invalid)
+ if (!W)
return;
if (!Weights && *W)
@@ -3993,7 +3993,7 @@ void SwitchInstProfUpdateWrapper::setSuccessorWeight(
if (Weights) {
auto &OldW = Weights.getValue()[idx];
if (*W != OldW) {
- State = Changed;
+ Changed = true;
OldW = *W;
}
}
@@ -4136,13 +4136,14 @@ AllocaInst *AllocaInst::cloneImpl() const {
LoadInst *LoadInst::cloneImpl() const {
return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
- getAlignment(), getOrdering(), getSyncScopeID());
+ MaybeAlign(getAlignment()), getOrdering(),
+ getSyncScopeID());
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
- getAlignment(), getOrdering(), getSyncScopeID());
-
+ MaybeAlign(getAlignment()), getOrdering(),
+ getSyncScopeID());
}
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
diff --git a/lib/IR/IntrinsicInst.cpp b/lib/IR/IntrinsicInst.cpp
index 7a042326f67f..26ed46a9cd91 100644
--- a/lib/IR/IntrinsicInst.cpp
+++ b/lib/IR/IntrinsicInst.cpp
@@ -67,13 +67,12 @@ int llvm::Intrinsic::lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
// size 1. During the search, we can skip the prefix that we already know is
// identical. By using strncmp we consider names with differing suffixes to
// be part of the equal range.
- size_t CmpStart = 0;
size_t CmpEnd = 4; // Skip the "llvm" component.
const char *const *Low = NameTable.begin();
const char *const *High = NameTable.end();
const char *const *LastLow = Low;
while (CmpEnd < Name.size() && High - Low > 0) {
- CmpStart = CmpEnd;
+ size_t CmpStart = CmpEnd;
CmpEnd = Name.find('.', CmpStart + 1);
CmpEnd = CmpEnd == StringRef::npos ? Name.size() : CmpEnd;
auto Cmp = [CmpStart, CmpEnd](const char *LHS, const char *RHS) {
@@ -107,7 +106,7 @@ Optional<ConstrainedFPIntrinsic::RoundingMode>
ConstrainedFPIntrinsic::getRoundingMode() const {
unsigned NumOperands = getNumArgOperands();
Metadata *MD =
- dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2))->getMetadata();
+ cast<MetadataAsValue>(getArgOperand(NumOperands - 2))->getMetadata();
if (!MD || !isa<MDString>(MD))
return None;
return StrToRoundingMode(cast<MDString>(MD)->getString());
@@ -143,7 +142,7 @@ ConstrainedFPIntrinsic::RoundingModeToStr(RoundingMode UseRounding) {
RoundingStr = "round.upward";
break;
case ConstrainedFPIntrinsic::rmTowardZero:
- RoundingStr = "round.tozero";
+ RoundingStr = "round.towardzero";
break;
}
return RoundingStr;
@@ -153,7 +152,7 @@ Optional<ConstrainedFPIntrinsic::ExceptionBehavior>
ConstrainedFPIntrinsic::getExceptionBehavior() const {
unsigned NumOperands = getNumArgOperands();
Metadata *MD =
- dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1))->getMetadata();
+ cast<MetadataAsValue>(getArgOperand(NumOperands - 1))->getMetadata();
if (!MD || !isa<MDString>(MD))
return None;
return StrToExceptionBehavior(cast<MDString>(MD)->getString());
@@ -189,6 +188,8 @@ bool ConstrainedFPIntrinsic::isUnaryOp() const {
switch (getIntrinsicID()) {
default:
return false;
+ case Intrinsic::experimental_constrained_fptosi:
+ case Intrinsic::experimental_constrained_fptoui:
case Intrinsic::experimental_constrained_fptrunc:
case Intrinsic::experimental_constrained_fpext:
case Intrinsic::experimental_constrained_sqrt:
@@ -199,10 +200,14 @@ bool ConstrainedFPIntrinsic::isUnaryOp() const {
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
return true;
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index e1cdf6b539db..5e8772186a2a 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -36,34 +36,9 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
// Create the fixed metadata kinds. This is done in the same order as the
// MD_* enum values so that they correspond.
std::pair<unsigned, StringRef> MDKinds[] = {
- {MD_dbg, "dbg"},
- {MD_tbaa, "tbaa"},
- {MD_prof, "prof"},
- {MD_fpmath, "fpmath"},
- {MD_range, "range"},
- {MD_tbaa_struct, "tbaa.struct"},
- {MD_invariant_load, "invariant.load"},
- {MD_alias_scope, "alias.scope"},
- {MD_noalias, "noalias"},
- {MD_nontemporal, "nontemporal"},
- {MD_mem_parallel_loop_access, "llvm.mem.parallel_loop_access"},
- {MD_nonnull, "nonnull"},
- {MD_dereferenceable, "dereferenceable"},
- {MD_dereferenceable_or_null, "dereferenceable_or_null"},
- {MD_make_implicit, "make.implicit"},
- {MD_unpredictable, "unpredictable"},
- {MD_invariant_group, "invariant.group"},
- {MD_align, "align"},
- {MD_loop, "llvm.loop"},
- {MD_type, "type"},
- {MD_section_prefix, "section_prefix"},
- {MD_absolute_symbol, "absolute_symbol"},
- {MD_associated, "associated"},
- {MD_callees, "callees"},
- {MD_irr_loop, "irr_loop"},
- {MD_access_group, "llvm.access.group"},
- {MD_callback, "callback"},
- {MD_preserve_access_index, "llvm.preserve.access.index"},
+#define LLVM_FIXED_MD_KIND(EnumID, Name, Value) {EnumID, Name},
+#include "llvm/IR/FixedMetadataKinds.def"
+#undef LLVM_FIXED_MD_KIND
};
for (auto &MDKind : MDKinds) {
diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp
index c6ab2c6f213a..5f9782714170 100644
--- a/lib/IR/LLVMContextImpl.cpp
+++ b/lib/IR/LLVMContextImpl.cpp
@@ -21,7 +21,7 @@
using namespace llvm;
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
- : DiagHandler(llvm::make_unique<DiagnosticHandler>()),
+ : DiagHandler(std::make_unique<DiagnosticHandler>()),
VoidTy(C, Type::VoidTyID),
LabelTy(C, Type::LabelTyID),
HalfTy(C, Type::HalfTyID),
diff --git a/lib/IR/LegacyPassManager.cpp b/lib/IR/LegacyPassManager.cpp
index c575d6e782b9..3a03c493100b 100644
--- a/lib/IR/LegacyPassManager.cpp
+++ b/lib/IR/LegacyPassManager.cpp
@@ -1680,7 +1680,6 @@ bool FPPassManager::runOnFunction(Function &F) {
bool FPPassManager::runOnModule(Module &M) {
bool Changed = false;
- llvm::TimeTraceScope TimeScope("OptModule", M.getName());
for (Function &F : M)
Changed |= runOnFunction(F);
@@ -1999,10 +1998,28 @@ void FunctionPass::assignPassManager(PMStack &PMS,
FPP->add(this);
}
+void BasicBlockPass::preparePassManager(PMStack &PMS) {
+ // Find BBPassManager
+ while (!PMS.empty() &&
+ PMS.top()->getPassManagerType() > PMT_BasicBlockPassManager)
+ PMS.pop();
+
+ // If this pass is destroying high level information that is used
+ // by other passes that are managed by BBPM then do not insert
+ // this pass in current BBPM. Use new BBPassManager.
+ if (PMS.top()->getPassManagerType() == PMT_BasicBlockPassManager &&
+ !PMS.top()->preserveHigherLevelAnalysis(this))
+ PMS.pop();
+}
+
/// Find appropriate Basic Pass Manager or Call Graph Pass Manager
/// in the PM Stack and add self into that manager.
void BasicBlockPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
+ while (!PMS.empty() &&
+ PMS.top()->getPassManagerType() > PMT_BasicBlockPassManager)
+ PMS.pop();
+
BBPassManager *BBP;
// Basic Pass Manager is a leaf pass manager. It does not handle
@@ -2018,6 +2035,7 @@ void BasicBlockPass::assignPassManager(PMStack &PMS,
// [1] Create new Basic Block Manager
BBP = new BBPassManager();
+ BBP->populateInheritedAnalysis(PMS);
// [2] Set up new manager's top level manager
// Basic Block Pass Manager does not live by itself
diff --git a/lib/IR/MDBuilder.cpp b/lib/IR/MDBuilder.cpp
index 14bcb3a29b07..7bdb85ace522 100644
--- a/lib/IR/MDBuilder.cpp
+++ b/lib/IR/MDBuilder.cpp
@@ -309,3 +309,15 @@ MDNode *MDBuilder::createIrrLoopHeaderWeight(uint64_t Weight) {
};
return MDNode::get(Context, Vals);
}
+
+MDNode *MDBuilder::createMisExpect(uint64_t Index, uint64_t LikleyWeight,
+ uint64_t UnlikleyWeight) {
+ auto *IntType = Type::getInt64Ty(Context);
+ Metadata *Vals[] = {
+ createString("misexpect"),
+ createConstant(ConstantInt::get(IntType, Index)),
+ createConstant(ConstantInt::get(IntType, LikleyWeight)),
+ createConstant(ConstantInt::get(IntType, UnlikleyWeight)),
+ };
+ return MDNode::get(Context, Vals);
+}
diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp
index 748a2238e642..62c2aa86f3b0 100644
--- a/lib/IR/Metadata.cpp
+++ b/lib/IR/Metadata.cpp
@@ -1497,6 +1497,24 @@ void GlobalObject::addTypeMetadata(unsigned Offset, Metadata *TypeID) {
TypeID}));
}
+void GlobalObject::addVCallVisibilityMetadata(VCallVisibility Visibility) {
+ addMetadata(LLVMContext::MD_vcall_visibility,
+ *MDNode::get(getContext(),
+ {ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt64Ty(getContext()), Visibility))}));
+}
+
+GlobalObject::VCallVisibility GlobalObject::getVCallVisibility() const {
+ if (MDNode *MD = getMetadata(LLVMContext::MD_vcall_visibility)) {
+ uint64_t Val = cast<ConstantInt>(
+ cast<ConstantAsMetadata>(MD->getOperand(0))->getValue())
+ ->getZExtValue();
+ assert(Val <= 2 && "unknown vcall visibility!");
+ return (VCallVisibility)Val;
+ }
+ return VCallVisibility::VCallVisibilityPublic;
+}
+
void Function::setSubprogram(DISubprogram *SP) {
setMetadata(LLVMContext::MD_dbg, SP);
}
diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp
index dbf4035ac7c1..25efd009194f 100644
--- a/lib/IR/Module.cpp
+++ b/lib/IR/Module.cpp
@@ -604,7 +604,7 @@ GlobalVariable *llvm::collectUsedGlobalVariables(
const ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
for (Value *Op : Init->operands()) {
- GlobalValue *G = cast<GlobalValue>(Op->stripPointerCastsNoFollowAliases());
+ GlobalValue *G = cast<GlobalValue>(Op->stripPointerCasts());
Set.insert(G);
}
return GV;
diff --git a/lib/IR/RemarkStreamer.cpp b/lib/IR/RemarkStreamer.cpp
index 5b4c7e72b479..0fcc06b961f3 100644
--- a/lib/IR/RemarkStreamer.cpp
+++ b/lib/IR/RemarkStreamer.cpp
@@ -15,15 +15,17 @@
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/Remarks/BitstreamRemarkSerializer.h"
#include "llvm/Remarks/RemarkFormat.h"
+#include "llvm/Remarks/RemarkSerializer.h"
using namespace llvm;
-RemarkStreamer::RemarkStreamer(StringRef Filename,
- std::unique_ptr<remarks::Serializer> Serializer)
- : Filename(Filename), PassFilter(), Serializer(std::move(Serializer)) {
- assert(!Filename.empty() && "This needs to be a real filename.");
-}
+RemarkStreamer::RemarkStreamer(
+ std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer,
+ Optional<StringRef> FilenameIn)
+ : PassFilter(), RemarkSerializer(std::move(RemarkSerializer)),
+ Filename(FilenameIn ? Optional<std::string>(FilenameIn->str()) : None) {}
Error RemarkStreamer::setFilter(StringRef Filter) {
Regex R = Regex(Filter);
@@ -99,24 +101,13 @@ void RemarkStreamer::emit(const DiagnosticInfoOptimizationBase &Diag) {
// First, convert the diagnostic to a remark.
remarks::Remark R = toRemark(Diag);
// Then, emit the remark through the serializer.
- Serializer->emit(R);
+ RemarkSerializer->emit(R);
}
char RemarkSetupFileError::ID = 0;
char RemarkSetupPatternError::ID = 0;
char RemarkSetupFormatError::ID = 0;
-static std::unique_ptr<remarks::Serializer>
-formatToSerializer(remarks::Format RemarksFormat, raw_ostream &OS) {
- switch (RemarksFormat) {
- default:
- llvm_unreachable("Unknown remark serializer format.");
- return nullptr;
- case remarks::Format::YAML:
- return llvm::make_unique<remarks::YAMLSerializer>(OS);
- };
-}
-
Expected<std::unique_ptr<ToolOutputFile>>
llvm::setupOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
StringRef RemarksPasses, StringRef RemarksFormat,
@@ -131,24 +122,63 @@ llvm::setupOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
if (RemarksFilename.empty())
return nullptr;
+ Expected<remarks::Format> Format = remarks::parseFormat(RemarksFormat);
+ if (Error E = Format.takeError())
+ return make_error<RemarkSetupFormatError>(std::move(E));
+
std::error_code EC;
+ auto Flags = *Format == remarks::Format::YAML ? sys::fs::OF_Text
+ : sys::fs::OF_None;
auto RemarksFile =
- llvm::make_unique<ToolOutputFile>(RemarksFilename, EC, sys::fs::F_None);
+ std::make_unique<ToolOutputFile>(RemarksFilename, EC, Flags);
// We don't use llvm::FileError here because some diagnostics want the file
// name separately.
if (EC)
return make_error<RemarkSetupFileError>(errorCodeToError(EC));
+ Expected<std::unique_ptr<remarks::RemarkSerializer>> RemarkSerializer =
+ remarks::createRemarkSerializer(
+ *Format, remarks::SerializerMode::Separate, RemarksFile->os());
+ if (Error E = RemarkSerializer.takeError())
+ return make_error<RemarkSetupFormatError>(std::move(E));
+
+ Context.setRemarkStreamer(std::make_unique<RemarkStreamer>(
+ std::move(*RemarkSerializer), RemarksFilename));
+
+ if (!RemarksPasses.empty())
+ if (Error E = Context.getRemarkStreamer()->setFilter(RemarksPasses))
+ return make_error<RemarkSetupPatternError>(std::move(E));
+
+ return std::move(RemarksFile);
+}
+
+Error llvm::setupOptimizationRemarks(LLVMContext &Context, raw_ostream &OS,
+ StringRef RemarksPasses,
+ StringRef RemarksFormat,
+ bool RemarksWithHotness,
+ unsigned RemarksHotnessThreshold) {
+ if (RemarksWithHotness)
+ Context.setDiagnosticsHotnessRequested(true);
+
+ if (RemarksHotnessThreshold)
+ Context.setDiagnosticsHotnessThreshold(RemarksHotnessThreshold);
+
Expected<remarks::Format> Format = remarks::parseFormat(RemarksFormat);
if (Error E = Format.takeError())
return make_error<RemarkSetupFormatError>(std::move(E));
- Context.setRemarkStreamer(llvm::make_unique<RemarkStreamer>(
- RemarksFilename, formatToSerializer(*Format, RemarksFile->os())));
+ Expected<std::unique_ptr<remarks::RemarkSerializer>> RemarkSerializer =
+ remarks::createRemarkSerializer(*Format,
+ remarks::SerializerMode::Separate, OS);
+ if (Error E = RemarkSerializer.takeError())
+ return make_error<RemarkSetupFormatError>(std::move(E));
+
+ Context.setRemarkStreamer(
+ std::make_unique<RemarkStreamer>(std::move(*RemarkSerializer)));
if (!RemarksPasses.empty())
if (Error E = Context.getRemarkStreamer()->setFilter(RemarksPasses))
return make_error<RemarkSetupPatternError>(std::move(E));
- return std::move(RemarksFile);
+ return Error::success();
}
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
index 7f3dea5e6a6d..c90347ec48fd 100644
--- a/lib/IR/SafepointIRVerifier.cpp
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -102,11 +102,11 @@ public:
}
bool isDeadEdge(const Use *U) const {
- assert(dyn_cast<Instruction>(U->getUser())->isTerminator() &&
+ assert(cast<Instruction>(U->getUser())->isTerminator() &&
"edge must be operand of terminator");
assert(cast_or_null<BasicBlock>(U->get()) &&
"edge must refer to basic block");
- assert(!isDeadBlock(dyn_cast<Instruction>(U->getUser())->getParent()) &&
+ assert(!isDeadBlock(cast<Instruction>(U->getUser())->getParent()) &&
"isDeadEdge() must be applied to edge from live block");
return DeadEdges.count(U);
}
diff --git a/lib/IR/Type.cpp b/lib/IR/Type.cpp
index 8ece7f223dd2..3eab5042b542 100644
--- a/lib/IR/Type.cpp
+++ b/lib/IR/Type.cpp
@@ -26,6 +26,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <utility>
@@ -111,18 +112,22 @@ bool Type::isEmptyTy() const {
return false;
}
-unsigned Type::getPrimitiveSizeInBits() const {
+TypeSize Type::getPrimitiveSizeInBits() const {
switch (getTypeID()) {
- case Type::HalfTyID: return 16;
- case Type::FloatTyID: return 32;
- case Type::DoubleTyID: return 64;
- case Type::X86_FP80TyID: return 80;
- case Type::FP128TyID: return 128;
- case Type::PPC_FP128TyID: return 128;
- case Type::X86_MMXTyID: return 64;
- case Type::IntegerTyID: return cast<IntegerType>(this)->getBitWidth();
- case Type::VectorTyID: return cast<VectorType>(this)->getBitWidth();
- default: return 0;
+ case Type::HalfTyID: return TypeSize::Fixed(16);
+ case Type::FloatTyID: return TypeSize::Fixed(32);
+ case Type::DoubleTyID: return TypeSize::Fixed(64);
+ case Type::X86_FP80TyID: return TypeSize::Fixed(80);
+ case Type::FP128TyID: return TypeSize::Fixed(128);
+ case Type::PPC_FP128TyID: return TypeSize::Fixed(128);
+ case Type::X86_MMXTyID: return TypeSize::Fixed(64);
+ case Type::IntegerTyID:
+ return TypeSize::Fixed(cast<IntegerType>(this)->getBitWidth());
+ case Type::VectorTyID: {
+ const VectorType *VTy = cast<VectorType>(this);
+ return TypeSize(VTy->getBitWidth(), VTy->isScalable());
+ }
+ default: return TypeSize::Fixed(0);
}
}
diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp
index b7f77dc3043e..3c8a5b536695 100644
--- a/lib/IR/Value.cpp
+++ b/lib/IR/Value.cpp
@@ -444,15 +444,11 @@ void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
"replaceUses of value with new value of different type!");
assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
- use_iterator UI = use_begin(), E = use_end();
- for (; UI != E;) {
- Use &U = *UI;
- ++UI;
- auto *Usr = dyn_cast<Instruction>(U.getUser());
- if (Usr && Usr->getParent() == BB)
- continue;
- U.set(New);
- }
+ replaceUsesWithIf(New, [BB](Use &U) {
+ auto *I = dyn_cast<Instruction>(U.getUser());
+ // Don't replace if it's an instruction in the BB basic block.
+ return !I || I->getParent() != BB;
+ });
}
namespace {
@@ -460,8 +456,8 @@ namespace {
enum PointerStripKind {
PSK_ZeroIndices,
PSK_ZeroIndicesAndAliases,
- PSK_ZeroIndicesAndAliasesSameRepresentation,
- PSK_ZeroIndicesAndAliasesAndInvariantGroups,
+ PSK_ZeroIndicesSameRepresentation,
+ PSK_ZeroIndicesAndInvariantGroups,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
@@ -479,10 +475,10 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
do {
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
- case PSK_ZeroIndicesAndAliases:
- case PSK_ZeroIndicesAndAliasesSameRepresentation:
- case PSK_ZeroIndicesAndAliasesAndInvariantGroups:
case PSK_ZeroIndices:
+ case PSK_ZeroIndicesAndAliases:
+ case PSK_ZeroIndicesSameRepresentation:
+ case PSK_ZeroIndicesAndInvariantGroups:
if (!GEP->hasAllZeroIndices())
return V;
break;
@@ -498,15 +494,13 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
- } else if (StripKind != PSK_ZeroIndicesAndAliasesSameRepresentation &&
+ } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
// TODO: If we know an address space cast will not change the
// representation we could look through it here as well.
V = cast<Operator>(V)->getOperand(0);
- } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
- if (StripKind == PSK_ZeroIndices || GA->isInterposable())
- return V;
- V = GA->getAliasee();
+ } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
+ V = cast<GlobalAlias>(V)->getAliasee();
} else {
if (const auto *Call = dyn_cast<CallBase>(V)) {
if (const Value *RV = Call->getReturnedArgOperand()) {
@@ -516,7 +510,7 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
// The result of launder.invariant.group must alias it's argument,
// but it can't be marked with returned attribute, that's why it needs
// special case.
- if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
+ if (StripKind == PSK_ZeroIndicesAndInvariantGroups &&
(Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
V = Call->getArgOperand(0);
@@ -533,16 +527,15 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
} // end anonymous namespace
const Value *Value::stripPointerCasts() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
+ return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
}
-const Value *Value::stripPointerCastsSameRepresentation() const {
- return stripPointerCastsAndOffsets<
- PSK_ZeroIndicesAndAliasesSameRepresentation>(this);
+const Value *Value::stripPointerCastsAndAliases() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
}
-const Value *Value::stripPointerCastsNoFollowAliases() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
+const Value *Value::stripPointerCastsSameRepresentation() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
}
const Value *Value::stripInBoundsConstantOffsets() const {
@@ -550,8 +543,7 @@ const Value *Value::stripInBoundsConstantOffsets() const {
}
const Value *Value::stripPointerCastsAndInvariantGroups() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndInvariantGroups>(
- this);
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndInvariantGroups>(this);
}
const Value *
@@ -650,6 +642,19 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
}
CanBeNull = true;
}
+ } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
+ if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ if (DerefBytes == 0) {
+ if (MDNode *MD =
+ IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ DerefBytes = CI->getLimitedValue();
+ }
+ CanBeNull = true;
+ }
} else if (auto *AI = dyn_cast<AllocaInst>(this)) {
if (!AI->isArrayAllocation()) {
DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType());
@@ -666,21 +671,21 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
return DerefBytes;
}
-unsigned Value::getPointerAlignment(const DataLayout &DL) const {
+MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const {
assert(getType()->isPointerTy() && "must be pointer");
-
- unsigned Align = 0;
if (auto *GO = dyn_cast<GlobalObject>(this)) {
if (isa<Function>(GO)) {
+ const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign();
switch (DL.getFunctionPtrAlignType()) {
case DataLayout::FunctionPtrAlignType::Independent:
- return DL.getFunctionPtrAlign();
+ return FunctionPtrAlign;
case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
- return std::max(DL.getFunctionPtrAlign(), GO->getAlignment());
+ return std::max(FunctionPtrAlign, MaybeAlign(GO->getAlignment()));
}
+ llvm_unreachable("Unhandled FunctionPtrAlignType");
}
- Align = GO->getAlignment();
- if (Align == 0) {
+ const MaybeAlign Alignment(GO->getAlignment());
+ if (!Alignment) {
if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
Type *ObjectType = GVar->getValueType();
if (ObjectType->isSized()) {
@@ -688,37 +693,43 @@ unsigned Value::getPointerAlignment(const DataLayout &DL) const {
// it the preferred alignment. Otherwise, we have to assume that it
// may only have the minimum ABI alignment.
if (GVar->isStrongDefinitionForLinker())
- Align = DL.getPreferredAlignment(GVar);
+ return MaybeAlign(DL.getPreferredAlignment(GVar));
else
- Align = DL.getABITypeAlignment(ObjectType);
+ return Align(DL.getABITypeAlignment(ObjectType));
}
}
}
+ return Alignment;
} else if (const Argument *A = dyn_cast<Argument>(this)) {
- Align = A->getParamAlignment();
-
- if (!Align && A->hasStructRetAttr()) {
+ const MaybeAlign Alignment(A->getParamAlignment());
+ if (!Alignment && A->hasStructRetAttr()) {
// An sret parameter has at least the ABI alignment of the return type.
Type *EltTy = cast<PointerType>(A->getType())->getElementType();
if (EltTy->isSized())
- Align = DL.getABITypeAlignment(EltTy);
+ return Align(DL.getABITypeAlignment(EltTy));
}
+ return Alignment;
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
- Align = AI->getAlignment();
- if (Align == 0) {
+ const MaybeAlign Alignment(AI->getAlignment());
+ if (!Alignment) {
Type *AllocatedType = AI->getAllocatedType();
if (AllocatedType->isSized())
- Align = DL.getPrefTypeAlignment(AllocatedType);
+ return MaybeAlign(DL.getPrefTypeAlignment(AllocatedType));
}
- } else if (const auto *Call = dyn_cast<CallBase>(this))
- Align = Call->getAttributes().getRetAlignment();
- else if (const LoadInst *LI = dyn_cast<LoadInst>(this))
+ return Alignment;
+ } else if (const auto *Call = dyn_cast<CallBase>(this)) {
+ const MaybeAlign Alignment(Call->getRetAlignment());
+ if (!Alignment && Call->getCalledFunction())
+ return MaybeAlign(
+ Call->getCalledFunction()->getAttributes().getRetAlignment());
+ return Alignment;
+ } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
- Align = CI->getLimitedValue();
+ return MaybeAlign(CI->getLimitedValue());
}
-
- return Align;
+ }
+ return llvm::None;
}
const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 9346c8bda75d..b17fc433ed74 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -119,6 +119,7 @@ struct VerifierSupport {
raw_ostream *OS;
const Module &M;
ModuleSlotTracker MST;
+ Triple TT;
const DataLayout &DL;
LLVMContext &Context;
@@ -130,7 +131,8 @@ struct VerifierSupport {
bool TreatBrokenDebugInfoAsError = true;
explicit VerifierSupport(raw_ostream *OS, const Module &M)
- : OS(OS), M(M), MST(&M), DL(M.getDataLayout()), Context(M.getContext()) {}
+ : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
+ Context(M.getContext()) {}
private:
void Write(const Module *M) {
@@ -416,6 +418,7 @@ private:
void visitBasicBlock(BasicBlock &BB);
void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
+ void visitProfMetadata(Instruction &I, MDNode *MD);
template <class Ty> bool isValidMetadataArray(const MDTuple &N);
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
@@ -515,6 +518,7 @@ private:
DIExpression::FragmentInfo Fragment,
ValueOrMetadata *Desc);
void verifyFnArgs(const DbgVariableIntrinsic &I);
+ void verifyNotEntryValue(const DbgVariableIntrinsic &I);
/// Module-level debug info verification...
void verifyCompileUnits();
@@ -670,7 +674,7 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
Assert(InitArray, "wrong initalizer for intrinsic global variable",
Init);
for (Value *Op : InitArray->operands()) {
- Value *V = Op->stripPointerCastsNoFollowAliases();
+ Value *V = Op->stripPointerCasts();
Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
isa<GlobalAlias>(V),
"invalid llvm.used member", V);
@@ -979,6 +983,9 @@ void Verifier::visitDICompositeType(const DICompositeType &N) {
N.getRawVTableHolder());
AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
"invalid reference flags", &N);
+ unsigned DIBlockByRefStruct = 1 << 4;
+ AssertDI((N.getFlags() & DIBlockByRefStruct) == 0,
+ "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
if (N.isVector()) {
const DINodeArray Elements = N.getElements();
@@ -1306,11 +1313,12 @@ void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
}
void Verifier::visitComdat(const Comdat &C) {
- // The Module is invalid if the GlobalValue has private linkage. Entities
- // with private linkage don't have entries in the symbol table.
- if (const GlobalValue *GV = M.getNamedValue(C.getName()))
- Assert(!GV->hasPrivateLinkage(), "comdat global value has private linkage",
- GV);
+ // In COFF the Module is invalid if the GlobalValue has private linkage.
+ // Entities with private linkage don't have entries in the symbol table.
+ if (TT.isOSBinFormatCOFF())
+ if (const GlobalValue *GV = M.getNamedValue(C.getName()))
+ Assert(!GV->hasPrivateLinkage(),
+ "comdat global value has private linkage", GV);
}
void Verifier::visitModuleIdents(const Module &M) {
@@ -2497,6 +2505,15 @@ void Verifier::visitCallBrInst(CallBrInst &CBI) {
Assert(CBI.getOperand(i) != CBI.getOperand(j),
"Duplicate callbr destination!", &CBI);
}
+ {
+ SmallPtrSet<BasicBlock *, 4> ArgBBs;
+ for (Value *V : CBI.args())
+ if (auto *BA = dyn_cast<BlockAddress>(V))
+ ArgBBs.insert(BA->getBasicBlock());
+ for (BasicBlock *BB : CBI.getIndirectDests())
+ Assert(ArgBBs.find(BB) != ArgBBs.end(),
+ "Indirect label missing from arglist.", &CBI);
+ }
visitTerminator(CBI);
}
@@ -2715,8 +2732,8 @@ void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
&I);
if (SrcTy->isVectorTy()) {
- VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
- VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ VectorType *VSrc = cast<VectorType>(SrcTy);
+ VectorType *VDest = cast<VectorType>(DestTy);
Assert(VSrc->getNumElements() == VDest->getNumElements(),
"PtrToInt Vector width mismatch", &I);
}
@@ -2740,8 +2757,8 @@ void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
&I);
if (SrcTy->isVectorTy()) {
- VectorType *VSrc = dyn_cast<VectorType>(SrcTy);
- VectorType *VDest = dyn_cast<VectorType>(DestTy);
+ VectorType *VSrc = cast<VectorType>(SrcTy);
+ VectorType *VDest = cast<VectorType>(DestTy);
Assert(VSrc->getNumElements() == VDest->getNumElements(),
"IntToPtr Vector width mismatch", &I);
}
@@ -3983,9 +4000,9 @@ void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
"apply only to pointer types", &I);
- Assert(isa<LoadInst>(I),
+ Assert((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
"dereferenceable, dereferenceable_or_null apply only to load"
- " instructions, use attributes for calls or invokes", &I);
+ " and inttoptr instructions, use attributes for calls or invokes", &I);
Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
"take one operand!", &I);
ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
@@ -3993,6 +4010,45 @@ void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
"dereferenceable_or_null metadata value must be an i64!", &I);
}
+void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
+ Assert(MD->getNumOperands() >= 2,
+ "!prof annotations should have no less than 2 operands", MD);
+
+ // Check first operand.
+ Assert(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
+ Assert(isa<MDString>(MD->getOperand(0)),
+ "expected string with name of the !prof annotation", MD);
+ MDString *MDS = cast<MDString>(MD->getOperand(0));
+ StringRef ProfName = MDS->getString();
+
+ // Check consistency of !prof branch_weights metadata.
+ if (ProfName.equals("branch_weights")) {
+ unsigned ExpectedNumOperands = 0;
+ if (BranchInst *BI = dyn_cast<BranchInst>(&I))
+ ExpectedNumOperands = BI->getNumSuccessors();
+ else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
+ ExpectedNumOperands = SI->getNumSuccessors();
+ else if (isa<CallInst>(&I) || isa<InvokeInst>(&I))
+ ExpectedNumOperands = 1;
+ else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
+ ExpectedNumOperands = IBI->getNumDestinations();
+ else if (isa<SelectInst>(&I))
+ ExpectedNumOperands = 2;
+ else
+ CheckFailed("!prof branch_weights are not allowed for this instruction",
+ MD);
+
+ Assert(MD->getNumOperands() == 1 + ExpectedNumOperands,
+ "Wrong number of operands", MD);
+ for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
+ auto &MDO = MD->getOperand(i);
+ Assert(MDO, "second operand should not be null", MD);
+ Assert(mdconst::dyn_extract<ConstantInt>(MDO),
+ "!prof brunch_weights operand is not a const int");
+ }
+ }
+}
+
/// verifyInstruction - Verify that an instruction is well formed.
///
void Verifier::visitInstruction(Instruction &I) {
@@ -4150,13 +4206,18 @@ void Verifier::visitInstruction(Instruction &I) {
"alignment is larger that implementation defined limit", &I);
}
+ if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
+ visitProfMetadata(I, MD);
+
if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
visitMDNode(*N);
}
- if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
+ if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) {
verifyFragmentExpression(*DII);
+ verifyNotEntryValue(*DII);
+ }
InstsInThisBlock.insert(&I);
}
@@ -4236,6 +4297,8 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
case Intrinsic::experimental_constrained_fdiv:
case Intrinsic::experimental_constrained_frem:
case Intrinsic::experimental_constrained_fma:
+ case Intrinsic::experimental_constrained_fptosi:
+ case Intrinsic::experimental_constrained_fptoui:
case Intrinsic::experimental_constrained_fptrunc:
case Intrinsic::experimental_constrained_fpext:
case Intrinsic::experimental_constrained_sqrt:
@@ -4248,12 +4311,16 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
case Intrinsic::experimental_constrained_maxnum:
case Intrinsic::experimental_constrained_minnum:
case Intrinsic::experimental_constrained_ceil:
case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround:
case Intrinsic::experimental_constrained_round:
case Intrinsic::experimental_constrained_trunc:
visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
@@ -4623,7 +4690,8 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
case Intrinsic::smul_fix:
case Intrinsic::smul_fix_sat:
- case Intrinsic::umul_fix: {
+ case Intrinsic::umul_fix:
+ case Intrinsic::umul_fix_sat: {
Value *Op1 = Call.getArgOperand(0);
Value *Op2 = Call.getArgOperand(1);
Assert(Op1->getType()->isIntOrIntVectorTy(),
@@ -4705,6 +4773,31 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
HasRoundingMD = true;
break;
+ case Intrinsic::experimental_constrained_lrint:
+ case Intrinsic::experimental_constrained_llrint: {
+ Assert((NumOperands == 3), "invalid arguments for constrained FP intrinsic",
+ &FPI);
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ HasExceptionMD = true;
+ HasRoundingMD = true;
+ }
+ break;
+
+ case Intrinsic::experimental_constrained_lround:
+ case Intrinsic::experimental_constrained_llround: {
+ Assert((NumOperands == 2), "invalid arguments for constrained FP intrinsic",
+ &FPI);
+ Type *ValTy = FPI.getArgOperand(0)->getType();
+ Type *ResultTy = FPI.getType();
+ Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
+ "Intrinsic does not support vectors", &FPI);
+ HasExceptionMD = true;
+ break;
+ }
+
case Intrinsic::experimental_constrained_fma:
Assert((NumOperands == 5), "invalid arguments for constrained FP intrinsic",
&FPI);
@@ -4727,6 +4820,33 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
HasRoundingMD = true;
break;
+ case Intrinsic::experimental_constrained_fptosi:
+ case Intrinsic::experimental_constrained_fptoui: {
+ Assert((NumOperands == 2),
+ "invalid arguments for constrained FP intrinsic", &FPI);
+ HasExceptionMD = true;
+
+ Value *Operand = FPI.getArgOperand(0);
+ uint64_t NumSrcElem = 0;
+ Assert(Operand->getType()->isFPOrFPVectorTy(),
+ "Intrinsic first argument must be floating point", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ NumSrcElem = OperandT->getNumElements();
+ }
+
+ Operand = &FPI;
+ Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
+ "Intrinsic first argument and result disagree on vector use", &FPI);
+ Assert(Operand->getType()->isIntOrIntVectorTy(),
+ "Intrinsic result must be an integer", &FPI);
+ if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
+ Assert(NumSrcElem == OperandT->getNumElements(),
+ "Intrinsic first argument and result vector lengths must be equal",
+ &FPI);
+ }
+ }
+ break;
+
case Intrinsic::experimental_constrained_fptrunc:
case Intrinsic::experimental_constrained_fpext: {
if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
@@ -4826,11 +4946,6 @@ void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
// This check is redundant with one in visitLocalVariable().
AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
Var->getRawType());
- if (auto *Type = dyn_cast_or_null<DIType>(Var->getRawType()))
- if (Type->isBlockByrefStruct())
- AssertDI(DII.getExpression() && DII.getExpression()->getNumElements(),
- "BlockByRef variable without complex expression", Var, &DII);
-
verifyFnArgs(DII);
}
@@ -4935,6 +5050,16 @@ void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
Prev, Var);
}
+void Verifier::verifyNotEntryValue(const DbgVariableIntrinsic &I) {
+ DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
+
+ // We don't know whether this intrinsic verified correctly.
+ if (!E || !E->isValid())
+ return;
+
+ AssertDI(!E->isEntryValue(), "Entry values are only allowed in MIR", &I);
+}
+
void Verifier::verifyCompileUnits() {
// When more than one Module is imported into the same context, such as during
// an LTO build before linking the modules, ODR type uniquing may cause types
@@ -5021,7 +5146,7 @@ struct VerifierLegacyPass : public FunctionPass {
}
bool doInitialization(Module &M) override {
- V = llvm::make_unique<Verifier>(
+ V = std::make_unique<Verifier>(
&dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
return false;
}