summaryrefslogtreecommitdiff
path: root/lib/IR
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-01-19 10:01:25 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-01-19 10:01:25 +0000
commitd8e91e46262bc44006913e6796843909f1ac7bcd (patch)
tree7d0c143d9b38190e0fa0180805389da22cd834c5 /lib/IR
parentb7eb8e35e481a74962664b63dfb09483b200209a (diff)
Notes
Diffstat (limited to 'lib/IR')
-rw-r--r--lib/IR/AsmWriter.cpp228
-rw-r--r--lib/IR/Attributes.cpp36
-rw-r--r--lib/IR/AutoUpgrade.cpp575
-rw-r--r--lib/IR/BasicBlock.cpp29
-rw-r--r--lib/IR/CMakeLists.txt2
-rw-r--r--lib/IR/ConstantFold.cpp20
-rw-r--r--lib/IR/Constants.cpp88
-rw-r--r--lib/IR/ConstantsContext.h4
-rw-r--r--lib/IR/Core.cpp442
-rw-r--r--lib/IR/DIBuilder.cpp86
-rw-r--r--lib/IR/DataLayout.cpp24
-rw-r--r--lib/IR/DebugInfo.cpp115
-rw-r--r--lib/IR/DebugInfoMetadata.cpp225
-rw-r--r--lib/IR/DebugLoc.cpp17
-rw-r--r--lib/IR/DiagnosticInfo.cpp50
-rw-r--r--lib/IR/DomTreeUpdater.cpp29
-rw-r--r--lib/IR/Dominators.cpp199
-rw-r--r--lib/IR/Function.cpp41
-rw-r--r--lib/IR/Globals.cpp7
-rw-r--r--lib/IR/IRBuilder.cpp26
-rw-r--r--lib/IR/IRPrintingPasses.cpp3
-rw-r--r--lib/IR/Instruction.cpp56
-rw-r--r--lib/IR/Instructions.cpp722
-rw-r--r--lib/IR/IntrinsicInst.cpp16
-rw-r--r--lib/IR/LLVMContext.cpp1
-rw-r--r--lib/IR/LLVMContextImpl.h76
-rw-r--r--lib/IR/LegacyPassManager.cpp352
-rw-r--r--lib/IR/MDBuilder.cpp7
-rw-r--r--lib/IR/Metadata.cpp6
-rw-r--r--lib/IR/Module.cpp89
-rw-r--r--lib/IR/ModuleSummaryIndex.cpp165
-rw-r--r--lib/IR/PassInstrumentation.cpp22
-rw-r--r--lib/IR/PassTimingInfo.cpp268
-rw-r--r--lib/IR/SafepointIRVerifier.cpp6
-rw-r--r--lib/IR/Type.cpp41
-rw-r--r--lib/IR/Value.cpp47
-rw-r--r--lib/IR/Verifier.cpp777
37 files changed, 3203 insertions, 1694 deletions
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index 99a25a723b4a..a5dc623e1a30 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -36,7 +36,6 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constant.h"
@@ -199,7 +198,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
!isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V);
if (auto *BA = dyn_cast<BlockAddress>(V))
ID = OM.lookup(BA->getBasicBlock()).first;
- llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
+ llvm::sort(List, [&](const Entry &L, const Entry &R) {
const Use *LU = L.first;
const Use *RU = R.first;
if (LU == RU)
@@ -363,6 +362,7 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
case CallingConv::ARM_APCS: Out << "arm_apcscc"; break;
case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break;
case CallingConv::ARM_AAPCS_VFP: Out << "arm_aapcs_vfpcc"; break;
+ case CallingConv::AArch64_VectorCall: Out << "aarch64_vector_pcs"; break;
case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break;
case CallingConv::AVR_INTR: Out << "avr_intrcc "; break;
case CallingConv::AVR_SIGNAL: Out << "avr_signalcc "; break;
@@ -704,6 +704,10 @@ private:
DenseMap<GlobalValue::GUID, unsigned> GUIDMap;
unsigned GUIDNext = 0;
+ /// TypeIdMap - The slot map for type ids used in the summary index.
+ StringMap<unsigned> TypeIdMap;
+ unsigned TypeIdNext = 0;
+
public:
/// Construct from a module.
///
@@ -735,6 +739,7 @@ public:
int getAttributeGroupSlot(AttributeSet AS);
int getModulePathSlot(StringRef Path);
int getGUIDSlot(GlobalValue::GUID GUID);
+ int getTypeIdSlot(StringRef Id);
/// If you'd like to deal with a function instead of just a module, use
/// this method to get its data into the SlotTracker.
@@ -789,6 +794,7 @@ private:
inline void CreateModulePathSlot(StringRef Path);
void CreateGUIDSlot(GlobalValue::GUID GUID);
+ void CreateTypeIdSlot(StringRef Id);
/// Add all of the module level global variables (and their initializers)
/// and function declarations, but not the contents of those functions.
@@ -991,9 +997,9 @@ void SlotTracker::processFunction() {
// We allow direct calls to any llvm.foo function here, because the
// target may not be linked into the optimizer.
- if (auto CS = ImmutableCallSite(&I)) {
+ if (const auto *Call = dyn_cast<CallBase>(&I)) {
// Add all the call attributes to the table.
- AttributeSet Attrs = CS.getAttributes().getFnAttributes();
+ AttributeSet Attrs = Call->getAttributes().getFnAttributes();
if (Attrs.hasAttributes())
CreateAttributeSetSlot(Attrs);
}
@@ -1025,8 +1031,12 @@ void SlotTracker::processIndex() {
for (auto &GlobalList : *TheIndex)
CreateGUIDSlot(GlobalList.first);
- for (auto &TId : TheIndex->typeIds())
- CreateGUIDSlot(GlobalValue::getGUID(TId.first));
+ // Start numbering the TypeIds after the GUIDs.
+ TypeIdNext = GUIDNext;
+
+ for (auto TidIter = TheIndex->typeIds().begin();
+ TidIter != TheIndex->typeIds().end(); TidIter++)
+ CreateTypeIdSlot(TidIter->second.first);
ST_DEBUG("end processIndex!\n");
}
@@ -1132,6 +1142,15 @@ int SlotTracker::getGUIDSlot(GlobalValue::GUID GUID) {
return I == GUIDMap.end() ? -1 : (int)I->second;
}
+int SlotTracker::getTypeIdSlot(StringRef Id) {
+ // Check for uninitialized state and do lazy initialization.
+ initializeIndexIfNeeded();
+
+ // Find the TypeId string in the map
+ auto I = TypeIdMap.find(Id);
+ return I == TypeIdMap.end() ? -1 : (int)I->second;
+}
+
/// CreateModuleSlot - Insert the specified GlobalValue* into the slot table.
void SlotTracker::CreateModuleSlot(const GlobalValue *V) {
assert(V && "Can't insert a null Value into SlotTracker!");
@@ -1202,6 +1221,11 @@ void SlotTracker::CreateGUIDSlot(GlobalValue::GUID GUID) {
GUIDMap[GUID] = GUIDNext++;
}
+/// Create a new slot for the specified Id
+void SlotTracker::CreateTypeIdSlot(StringRef Id) {
+ TypeIdMap[Id] = TypeIdNext++;
+}
+
//===----------------------------------------------------------------------===//
// AsmWriter Implementation
//===----------------------------------------------------------------------===//
@@ -1216,24 +1240,6 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
SlotTracker *Machine, const Module *Context,
bool FromValue = false);
-static void writeAtomicRMWOperation(raw_ostream &Out,
- AtomicRMWInst::BinOp Op) {
- switch (Op) {
- default: Out << " <unknown operation " << Op << ">"; break;
- case AtomicRMWInst::Xchg: Out << " xchg"; break;
- case AtomicRMWInst::Add: Out << " add"; break;
- case AtomicRMWInst::Sub: Out << " sub"; break;
- case AtomicRMWInst::And: Out << " and"; break;
- case AtomicRMWInst::Nand: Out << " nand"; break;
- case AtomicRMWInst::Or: Out << " or"; break;
- case AtomicRMWInst::Xor: Out << " xor"; break;
- case AtomicRMWInst::Max: Out << " max"; break;
- case AtomicRMWInst::Min: Out << " min"; break;
- case AtomicRMWInst::UMax: Out << " umax"; break;
- case AtomicRMWInst::UMin: Out << " umin"; break;
- }
-}
-
static void WriteOptimizationInfo(raw_ostream &Out, const User *U) {
if (const FPMathOperator *FPO = dyn_cast<const FPMathOperator>(U)) {
// 'Fast' is an abbreviation for all fast-math-flags.
@@ -1600,10 +1606,13 @@ struct MDFieldPrinter {
void printInt(StringRef Name, IntTy Int, bool ShouldSkipZero = true);
void printBool(StringRef Name, bool Value, Optional<bool> Default = None);
void printDIFlags(StringRef Name, DINode::DIFlags Flags);
+ void printDISPFlags(StringRef Name, DISubprogram::DISPFlags Flags);
template <class IntTy, class Stringifier>
void printDwarfEnum(StringRef Name, IntTy Value, Stringifier toString,
bool ShouldSkipZero = true);
void printEmissionKind(StringRef Name, DICompileUnit::DebugEmissionKind EK);
+ void printNameTableKind(StringRef Name,
+ DICompileUnit::DebugNameTableKind NTK);
};
} // end anonymous namespace
@@ -1696,11 +1705,42 @@ void MDFieldPrinter::printDIFlags(StringRef Name, DINode::DIFlags Flags) {
Out << FlagsFS << Extra;
}
+void MDFieldPrinter::printDISPFlags(StringRef Name,
+ DISubprogram::DISPFlags Flags) {
+ // Always print this field, because no flags in the IR at all will be
+ // interpreted as old-style isDefinition: true.
+ Out << FS << Name << ": ";
+
+ if (!Flags) {
+ Out << 0;
+ return;
+ }
+
+ SmallVector<DISubprogram::DISPFlags, 8> SplitFlags;
+ auto Extra = DISubprogram::splitFlags(Flags, SplitFlags);
+
+ FieldSeparator FlagsFS(" | ");
+ for (auto F : SplitFlags) {
+ auto StringF = DISubprogram::getFlagString(F);
+ assert(!StringF.empty() && "Expected valid flag");
+ Out << FlagsFS << StringF;
+ }
+ if (Extra || SplitFlags.empty())
+ Out << FlagsFS << Extra;
+}
+
void MDFieldPrinter::printEmissionKind(StringRef Name,
DICompileUnit::DebugEmissionKind EK) {
Out << FS << Name << ": " << DICompileUnit::emissionKindString(EK);
}
+void MDFieldPrinter::printNameTableKind(StringRef Name,
+ DICompileUnit::DebugNameTableKind NTK) {
+ if (NTK == DICompileUnit::DebugNameTableKind::Default)
+ return;
+ Out << FS << Name << ": " << DICompileUnit::nameTableKindString(NTK);
+}
+
template <class IntTy, class Stringifier>
void MDFieldPrinter::printDwarfEnum(StringRef Name, IntTy Value,
Stringifier toString, bool ShouldSkipZero) {
@@ -1744,6 +1784,8 @@ static void writeDILocation(raw_ostream &Out, const DILocation *DL,
Printer.printInt("column", DL->getColumn());
Printer.printMetadata("scope", DL->getRawScope(), /* ShouldSkipNull */ false);
Printer.printMetadata("inlinedAt", DL->getRawInlinedAt());
+ Printer.printBool("isImplicitCode", DL->isImplicitCode(),
+ /* Default */ false);
Out << ")";
}
@@ -1787,6 +1829,7 @@ static void writeDIBasicType(raw_ostream &Out, const DIBasicType *N,
Printer.printInt("align", N->getAlignInBits());
Printer.printDwarfEnum("encoding", N->getEncoding(),
dwarf::AttributeEncodingString);
+ Printer.printDIFlags("flags", N->getFlags());
Out << ")";
}
@@ -1890,7 +1933,8 @@ static void writeDICompileUnit(raw_ostream &Out, const DICompileUnit *N,
Printer.printBool("splitDebugInlining", N->getSplitDebugInlining(), true);
Printer.printBool("debugInfoForProfiling", N->getDebugInfoForProfiling(),
false);
- Printer.printBool("gnuPubnames", N->getGnuPubnames(), false);
+ Printer.printNameTableKind("nameTableKind", N->getNameTableKind());
+ Printer.printBool("rangesBaseAddress", N->getRangesBaseAddress(), false);
Out << ")";
}
@@ -1905,18 +1949,14 @@ static void writeDISubprogram(raw_ostream &Out, const DISubprogram *N,
Printer.printMetadata("file", N->getRawFile());
Printer.printInt("line", N->getLine());
Printer.printMetadata("type", N->getRawType());
- Printer.printBool("isLocal", N->isLocalToUnit());
- Printer.printBool("isDefinition", N->isDefinition());
Printer.printInt("scopeLine", N->getScopeLine());
Printer.printMetadata("containingType", N->getRawContainingType());
- Printer.printDwarfEnum("virtuality", N->getVirtuality(),
- dwarf::VirtualityString);
if (N->getVirtuality() != dwarf::DW_VIRTUALITY_none ||
N->getVirtualIndex() != 0)
Printer.printInt("virtualIndex", N->getVirtualIndex(), false);
Printer.printInt("thisAdjustment", N->getThisAdjustment());
Printer.printDIFlags("flags", N->getFlags());
- Printer.printBool("isOptimized", N->isOptimized());
+ Printer.printDISPFlags("spFlags", N->getSPFlags());
Printer.printMetadata("unit", N->getRawUnit());
Printer.printMetadata("templateParams", N->getRawTemplateParams());
Printer.printMetadata("declaration", N->getRawDeclaration());
@@ -2040,6 +2080,7 @@ static void writeDIGlobalVariable(raw_ostream &Out, const DIGlobalVariable *N,
Printer.printBool("isLocal", N->isLocalToUnit());
Printer.printBool("isDefinition", N->isDefinition());
Printer.printMetadata("declaration", N->getRawStaticDataMemberDeclaration());
+ Printer.printMetadata("templateParams", N->getRawTemplateParams());
Printer.printInt("align", N->getAlignInBits());
Out << ")";
}
@@ -2252,11 +2293,15 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Metadata *MD,
Machine = MachineStorage.get();
}
int Slot = Machine->getMetadataSlot(N);
- if (Slot == -1)
+ if (Slot == -1) {
+ if (const DILocation *Loc = dyn_cast<DILocation>(N)) {
+ writeDILocation(Out, Loc, TypePrinter, Machine, Context);
+ return;
+ }
// Give the pointer value instead of "badref", since this comes up all
// the time when debugging.
Out << "<" << N << ">";
- else
+ } else
Out << '!' << Slot;
return;
}
@@ -2313,7 +2358,7 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs);
- void writeOperandBundles(ImmutableCallSite CS);
+ void writeOperandBundles(const CallBase *Call);
void writeSyncScope(const LLVMContext &Context,
SyncScope::ID SSID);
void writeAtomic(const LLVMContext &Context,
@@ -2464,15 +2509,15 @@ void AssemblyWriter::writeParamOperand(const Value *Operand,
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
-void AssemblyWriter::writeOperandBundles(ImmutableCallSite CS) {
- if (!CS.hasOperandBundles())
+void AssemblyWriter::writeOperandBundles(const CallBase *Call) {
+ if (!Call->hasOperandBundles())
return;
Out << " [ ";
bool FirstBundle = true;
- for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
- OperandBundleUse BU = CS.getOperandBundleAt(i);
+ for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) {
+ OperandBundleUse BU = Call->getOperandBundleAt(i);
if (!FirstBundle)
Out << ", ";
@@ -2643,12 +2688,12 @@ void AssemblyWriter::printModuleSummaryIndex() {
}
// Print the TypeIdMap entries.
- for (auto &TId : TheIndex->typeIds()) {
- auto GUID = GlobalValue::getGUID(TId.first);
- Out << "^" << Machine.getGUIDSlot(GUID) << " = typeid: (name: \""
- << TId.first << "\"";
- printTypeIdSummary(TId.second);
- Out << ") ; guid = " << GUID << "\n";
+ for (auto TidIter = TheIndex->typeIds().begin();
+ TidIter != TheIndex->typeIds().end(); TidIter++) {
+ Out << "^" << Machine.getTypeIdSlot(TidIter->second.first)
+ << " = typeid: (name: \"" << TidIter->second.first << "\"";
+ printTypeIdSummary(TidIter->second.second);
+ Out << ") ; guid = " << TidIter->first << "\n";
}
}
@@ -2800,7 +2845,7 @@ void AssemblyWriter::printAliasSummary(const AliasSummary *AS) {
}
void AssemblyWriter::printGlobalVarSummary(const GlobalVarSummary *GS) {
- // Nothing for now
+ Out << ", varFlags: (readonly: " << GS->VarFlags.ReadOnly << ")";
}
static std::string getLinkageName(GlobalValue::LinkageTypes LT) {
@@ -2840,22 +2885,6 @@ static std::string getLinkageNameWithSpace(GlobalValue::LinkageTypes LT) {
return getLinkageName(LT) + " ";
}
-static const char *getHotnessName(CalleeInfo::HotnessType HT) {
- switch (HT) {
- case CalleeInfo::HotnessType::Unknown:
- return "unknown";
- case CalleeInfo::HotnessType::Cold:
- return "cold";
- case CalleeInfo::HotnessType::None:
- return "none";
- case CalleeInfo::HotnessType::Hot:
- return "hot";
- case CalleeInfo::HotnessType::Critical:
- return "critical";
- }
- llvm_unreachable("invalid hotness");
-}
-
void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) {
Out << ", insts: " << FS->instCount();
@@ -2867,6 +2896,7 @@ void AssemblyWriter::printFunctionSummary(const FunctionSummary *FS) {
Out << ", readOnly: " << FFlags.ReadOnly;
Out << ", noRecurse: " << FFlags.NoRecurse;
Out << ", returnDoesNotAlias: " << FFlags.ReturnDoesNotAlias;
+ Out << ", noInline: " << FFlags.NoInline;
Out << ")";
}
if (!FS->calls().empty()) {
@@ -2897,12 +2927,19 @@ void AssemblyWriter::printTypeIdInfo(
Out << "typeTests: (";
FieldSeparator FS;
for (auto &GUID : TIDInfo.TypeTests) {
- Out << FS;
- auto Slot = Machine.getGUIDSlot(GUID);
- if (Slot != -1)
- Out << "^" << Slot;
- else
+ auto TidIter = TheIndex->typeIds().equal_range(GUID);
+ if (TidIter.first == TidIter.second) {
+ Out << FS;
Out << GUID;
+ continue;
+ }
+ // Print all type id that correspond to this GUID.
+ for (auto It = TidIter.first; It != TidIter.second; ++It) {
+ Out << FS;
+ auto Slot = Machine.getTypeIdSlot(It->second.first);
+ assert(Slot != -1);
+ Out << "^" << Slot;
+ }
}
Out << ")";
}
@@ -2928,14 +2965,25 @@ void AssemblyWriter::printTypeIdInfo(
}
void AssemblyWriter::printVFuncId(const FunctionSummary::VFuncId VFId) {
- Out << "vFuncId: (";
- auto Slot = Machine.getGUIDSlot(VFId.GUID);
- if (Slot != -1)
- Out << "^" << Slot;
- else
+ auto TidIter = TheIndex->typeIds().equal_range(VFId.GUID);
+ if (TidIter.first == TidIter.second) {
+ Out << "vFuncId: (";
Out << "guid: " << VFId.GUID;
- Out << ", offset: " << VFId.Offset;
- Out << ")";
+ Out << ", offset: " << VFId.Offset;
+ Out << ")";
+ return;
+ }
+ // Print all type id that correspond to this GUID.
+ FieldSeparator FS;
+ for (auto It = TidIter.first; It != TidIter.second; ++It) {
+ Out << FS;
+ Out << "vFuncId: (";
+ auto Slot = Machine.getTypeIdSlot(It->second.first);
+ assert(Slot != -1);
+ Out << "^" << Slot;
+ Out << ", offset: " << VFId.Offset;
+ Out << ")";
+ }
}
void AssemblyWriter::printNonConstVCalls(
@@ -2955,11 +3003,13 @@ void AssemblyWriter::printConstVCalls(
FieldSeparator FS;
for (auto &ConstVCall : VCallList) {
Out << FS;
+ Out << "(";
printVFuncId(ConstVCall.VFunc);
if (!ConstVCall.Args.empty()) {
Out << ", ";
printArgs(ConstVCall.Args);
}
+ Out << ")";
}
Out << ")";
}
@@ -2989,6 +3039,8 @@ void AssemblyWriter::printSummary(const GlobalValueSummary &Summary) {
FieldSeparator FS;
for (auto &Ref : RefList) {
Out << FS;
+ if (Ref.isReadOnly())
+ Out << "readonly ";
Out << "^" << Machine.getGUIDSlot(Ref.getGUID());
}
Out << ")";
@@ -3354,6 +3406,13 @@ void AssemblyWriter::printFunction(const Function *F) {
StringRef UA = getUnnamedAddrEncoding(F->getUnnamedAddr());
if (!UA.empty())
Out << ' ' << UA;
+ // We print the function address space if it is non-zero or if we are writing
+ // a module with a non-zero program address space or if there is no valid
+ // Module* so that the file can be parsed without the datalayout string.
+ const Module *Mod = F->getParent();
+ if (F->getAddressSpace() != 0 || !Mod ||
+ Mod->getDataLayout().getProgramAddressSpace() != 0)
+ Out << " addrspace(" << F->getAddressSpace() << ")";
if (Attrs.hasAttributes(AttributeList::FunctionIndex))
Out << " #" << Machine.getAttributeGroupSlot(Attrs.getFnAttributes());
if (F->hasSection()) {
@@ -3491,6 +3550,23 @@ void AssemblyWriter::printInfoComment(const Value &V) {
AnnotationWriter->printInfoComment(V, Out);
}
+static void maybePrintCallAddrSpace(const Value *Operand, const Instruction *I,
+ raw_ostream &Out) {
+ // We print the address space of the call if it is non-zero.
+ unsigned CallAddrSpace = Operand->getType()->getPointerAddressSpace();
+ bool PrintAddrSpace = CallAddrSpace != 0;
+ if (!PrintAddrSpace) {
+ const Module *Mod = getModuleFromVal(I);
+ // We also print it if it is zero but not equal to the program address space
+ // or if we can't find a valid Module* to make it possible to parse
+ // the resulting file even without a datalayout string.
+ if (!Mod || Mod->getDataLayout().getProgramAddressSpace() != 0)
+ PrintAddrSpace = true;
+ }
+ if (PrintAddrSpace)
+ Out << " addrspace(" << CallAddrSpace << ")";
+}
+
// This member is called for each Instruction in a function..
void AssemblyWriter::printInstruction(const Instruction &I) {
if (AnnotationWriter) AnnotationWriter->emitInstructionAnnot(&I, Out);
@@ -3547,7 +3623,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print out the atomicrmw operation
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I))
- writeAtomicRMWOperation(Out, RMWI->getOperation());
+ Out << ' ' << AtomicRMWInst::getOperationName(RMWI->getOperation());
// Print out the type of the operands...
const Value *Operand = I.getNumOperands() ? I.getOperand(0) : nullptr;
@@ -3688,6 +3764,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (PAL.hasAttributes(AttributeList::ReturnIndex))
Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+ // Only print addrspace(N) if necessary:
+ maybePrintCallAddrSpace(Operand, &I, Out);
+
// If possible, print out the short form of the call instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
// and if the return type is not a pointer to a function.
@@ -3730,6 +3809,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
if (PAL.hasAttributes(AttributeList::ReturnIndex))
Out << ' ' << PAL.getAsString(AttributeList::ReturnIndex);
+ // Only print addrspace(N) if necessary:
+ maybePrintCallAddrSpace(Operand, &I, Out);
+
// If possible, print out the short form of the invoke instruction. We can
// only do this if the first argument is a pointer to a nonvararg function,
// and if the return type is not a pointer to a function.
diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp
index d87187481be0..ff46debb7a9e 100644
--- a/lib/IR/Attributes.cpp
+++ b/lib/IR/Attributes.cpp
@@ -323,6 +323,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return "returns_twice";
if (hasAttribute(Attribute::SExt))
return "signext";
+ if (hasAttribute(Attribute::SpeculativeLoadHardening))
+ return "speculative_load_hardening";
if (hasAttribute(Attribute::Speculatable))
return "speculatable";
if (hasAttribute(Attribute::StackProtect))
@@ -637,7 +639,7 @@ LLVM_DUMP_METHOD void AttributeSet::dump() const {
AttributeSetNode::AttributeSetNode(ArrayRef<Attribute> Attrs)
: AvailableAttrs(0), NumAttrs(Attrs.size()) {
// There's memory after the node where we can store the entries in.
- std::copy(Attrs.begin(), Attrs.end(), getTrailingObjects<Attribute>());
+ llvm::copy(Attrs, getTrailingObjects<Attribute>());
for (const auto I : *this) {
if (!I.isStringAttribute()) {
@@ -656,7 +658,7 @@ AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
FoldingSetNodeID ID;
SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
- llvm::sort(SortedAttrs.begin(), SortedAttrs.end());
+ llvm::sort(SortedAttrs);
for (const auto Attr : SortedAttrs)
Attr.Profile(ID);
@@ -807,7 +809,7 @@ AttributeListImpl::AttributeListImpl(LLVMContext &C,
assert(!Sets.empty() && "pointless AttributeListImpl");
// There's memory after the node where we can store the entries in.
- std::copy(Sets.begin(), Sets.end(), getTrailingObjects<AttributeSet>());
+ llvm::copy(Sets, getTrailingObjects<AttributeSet>());
// Initialize AvailableFunctionAttrs summary bitset.
static_assert(Attribute::EndAttrKinds <=
@@ -1683,28 +1685,32 @@ adjustCallerStackProbeSize(Function &Caller, const Function &Callee) {
}
/// If the inlined function defines a min legal vector width, then ensure
-/// the calling function has the same or larger min legal vector width. This
-/// function is called after the inlining decision has been made so we have to
-/// merge the attribute this way. Heuristics that would use
+/// the calling function has the same or larger min legal vector width. If the
+/// caller has the attribute, but the callee doesn't, we need to remove the
+/// attribute from the caller since we can't make any guarantees about the
+/// caller's requirements.
+/// This function is called after the inlining decision has been made so we have
+/// to merge the attribute this way. Heuristics that would use
/// min-legal-vector-width to determine inline compatibility would need to be
/// handled as part of inline cost analysis.
static void
adjustMinLegalVectorWidth(Function &Caller, const Function &Callee) {
- if (Callee.hasFnAttribute("min-legal-vector-width")) {
- uint64_t CalleeVectorWidth;
- Callee.getFnAttribute("min-legal-vector-width")
- .getValueAsString()
- .getAsInteger(0, CalleeVectorWidth);
- if (Caller.hasFnAttribute("min-legal-vector-width")) {
+ if (Caller.hasFnAttribute("min-legal-vector-width")) {
+ if (Callee.hasFnAttribute("min-legal-vector-width")) {
uint64_t CallerVectorWidth;
Caller.getFnAttribute("min-legal-vector-width")
.getValueAsString()
.getAsInteger(0, CallerVectorWidth);
- if (CallerVectorWidth < CalleeVectorWidth) {
+ uint64_t CalleeVectorWidth;
+ Callee.getFnAttribute("min-legal-vector-width")
+ .getValueAsString()
+ .getAsInteger(0, CalleeVectorWidth);
+ if (CallerVectorWidth < CalleeVectorWidth)
Caller.addFnAttr(Callee.getFnAttribute("min-legal-vector-width"));
- }
} else {
- Caller.addFnAttr(Callee.getFnAttribute("min-legal-vector-width"));
+ // If the callee doesn't have the attribute then we don't know anything
+ // and must drop the attribute from the caller.
+ Caller.removeFnAttr("min-legal-vector-width");
}
}
}
diff --git a/lib/IR/AutoUpgrade.cpp b/lib/IR/AutoUpgrade.cpp
index f098ad9725b6..b2eb8b09982e 100644
--- a/lib/IR/AutoUpgrade.cpp
+++ b/lib/IR/AutoUpgrade.cpp
@@ -71,7 +71,27 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
// like to use this information to remove upgrade code for some older
// intrinsics. It is currently undecided how we will determine that future
// point.
- if (Name=="ssse3.pabs.b.128" || // Added in 6.0
+ if (Name == "addcarryx.u32" || // Added in 8.0
+ Name == "addcarryx.u64" || // Added in 8.0
+ Name == "addcarry.u32" || // Added in 8.0
+ Name == "addcarry.u64" || // Added in 8.0
+ Name == "subborrow.u32" || // Added in 8.0
+ Name == "subborrow.u64" || // Added in 8.0
+ Name.startswith("sse2.padds.") || // Added in 8.0
+ Name.startswith("sse2.psubs.") || // Added in 8.0
+ Name.startswith("sse2.paddus.") || // Added in 8.0
+ Name.startswith("sse2.psubus.") || // Added in 8.0
+ Name.startswith("avx2.padds.") || // Added in 8.0
+ Name.startswith("avx2.psubs.") || // Added in 8.0
+ Name.startswith("avx2.paddus.") || // Added in 8.0
+ Name.startswith("avx2.psubus.") || // Added in 8.0
+ Name.startswith("avx512.padds.") || // Added in 8.0
+ Name.startswith("avx512.psubs.") || // Added in 8.0
+ Name.startswith("avx512.mask.padds.") || // Added in 8.0
+ Name.startswith("avx512.mask.psubs.") || // Added in 8.0
+ Name.startswith("avx512.mask.paddus.") || // Added in 8.0
+ Name.startswith("avx512.mask.psubus.") || // Added in 8.0
+ Name=="ssse3.pabs.b.128" || // Added in 6.0
Name=="ssse3.pabs.w.128" || // Added in 6.0
Name=="ssse3.pabs.d.128" || // Added in 6.0
Name.startswith("fma4.vfmadd.s") || // Added in 7.0
@@ -265,6 +285,12 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0
Name.startswith("avx512.mask.vpshld.") || // Added in 7.0
Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0
+ Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0
+ Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0
+ Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0
+ Name.startswith("avx512.vpshld.") || // Added in 8.0
+ Name.startswith("avx512.vpshrd.") || // Added in 8.0
Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0
Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0
Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0
@@ -272,10 +298,8 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
- Name.startswith("avx512.mask.prorv.") || // Added in 7.0
- Name.startswith("avx512.mask.pror.") || // Added in 7.0
- Name.startswith("avx512.mask.prolv.") || // Added in 7.0
- Name.startswith("avx512.mask.prol.") || // Added in 7.0
+ Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0
+ Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0
Name == "sse.cvtsi2ss" || // Added in 7.0
Name == "sse.cvtsi642ss" || // Added in 7.0
Name == "sse2.cvtsi2sd" || // Added in 7.0
@@ -340,6 +364,13 @@ static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) {
Name.startswith("avx512.cvtmask2") || // Added in 5.0
(Name.startswith("xop.vpcom") && // Added in 3.2
F->arg_size() == 2) ||
+ Name.startswith("xop.vprot") || // Added in 8.0
+ Name.startswith("avx512.prol") || // Added in 8.0
+ Name.startswith("avx512.pror") || // Added in 8.0
+ Name.startswith("avx512.mask.prorv.") || // Added in 8.0
+ Name.startswith("avx512.mask.pror.") || // Added in 8.0
+ Name.startswith("avx512.mask.prolv.") || // Added in 8.0
+ Name.startswith("avx512.mask.prol.") || // Added in 8.0
Name.startswith("avx512.ptestm") || //Added in 6.0
Name.startswith("avx512.ptestnm") || //Added in 6.0
Name.startswith("sse2.pavg") || // Added in 6.0
@@ -363,6 +394,17 @@ static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name,
return true;
}
+ if (Name == "rdtscp") { // Added in 8.0
+ // If this intrinsic has 0 operands, it's the new version.
+ if (F->getFunctionType()->getNumParams() == 0)
+ return false;
+
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::x86_rdtscp);
+ return true;
+ }
+
// SSE4.1 ptest functions may have an old signature.
if (Name.startswith("sse41.ptest")) { // Added in 3.2
if (Name.substr(11) == "c")
@@ -456,7 +498,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// the end of the name. Change name from llvm.arm.neon.vclz.* to
// llvm.ctlz.*
FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
- NewFn = Function::Create(fType, F->getLinkage(),
+ NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
"llvm.ctlz." + Name.substr(14), F->getParent());
return true;
}
@@ -472,7 +514,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// Can't use Intrinsic::getDeclaration here as the return types might
// then only be structurally equal.
FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
- NewFn = Function::Create(fType, F->getLinkage(),
+ NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(),
"llvm." + Name + ".p0i8", F->getParent());
return true;
}
@@ -502,6 +544,10 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
return true;
}
+ if (Name == "x86.seh.recoverfp") {
+ NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp);
+ return true;
+ }
break;
}
@@ -899,6 +945,148 @@ static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
return EmitX86Select(Builder, Mask, Align, Passthru);
}
+static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI,
+ bool ZeroMask, bool IndexForm) {
+ Type *Ty = CI.getType();
+ unsigned VecWidth = Ty->getPrimitiveSizeInBits();
+ unsigned EltWidth = Ty->getScalarSizeInBits();
+ bool IsFloat = Ty->isFPOrFPVectorTy();
+ Intrinsic::ID IID;
+ if (VecWidth == 128 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
+ else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_128;
+ else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
+ else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_128;
+ else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
+ else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_256;
+ else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
+ else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_256;
+ else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
+ else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_d_512;
+ else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
+ else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
+ IID = Intrinsic::x86_avx512_vpermi2var_q_512;
+ else if (VecWidth == 128 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
+ else if (VecWidth == 256 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
+ else if (VecWidth == 512 && EltWidth == 16)
+ IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
+ else if (VecWidth == 128 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
+ else if (VecWidth == 256 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
+ else if (VecWidth == 512 && EltWidth == 8)
+ IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1),
+ CI.getArgOperand(2) };
+
+ // If this isn't index form we need to swap operand 0 and 1.
+ if (!IndexForm)
+ std::swap(Args[0], Args[1]);
+
+ Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID),
+ Args);
+ Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty)
+ : Builder.CreateBitCast(CI.getArgOperand(1),
+ Ty);
+ return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru);
+}
+
+static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI,
+ bool IsSigned, bool IsAddition) {
+ Type *Ty = CI.getType();
+ Value *Op0 = CI.getOperand(0);
+ Value *Op1 = CI.getOperand(1);
+
+ Intrinsic::ID IID =
+ IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
+ : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Op0, Op1});
+
+ if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
+ Value *VecSrc = CI.getOperand(2);
+ Value *Mask = CI.getOperand(3);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
+static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI,
+ bool IsRotateRight) {
+ Type *Ty = CI.getType();
+ Value *Src = CI.getArgOperand(0);
+ Value *Amt = CI.getArgOperand(1);
+
+ // Amount may be scalar immediate, in which case create a splat vector.
+ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+ // we only care about the lowest log2 bits anyway.
+ if (Amt->getType() != Ty) {
+ unsigned NumElts = Ty->getVectorNumElements();
+ Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+ Amt = Builder.CreateVectorSplat(NumElts, Amt);
+ }
+
+ Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt});
+
+ if (CI.getNumArgOperands() == 4) { // For masked intrinsics.
+ Value *VecSrc = CI.getOperand(2);
+ Value *Mask = CI.getOperand(3);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
+static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI,
+ bool IsShiftRight, bool ZeroMask) {
+ Type *Ty = CI.getType();
+ Value *Op0 = CI.getArgOperand(0);
+ Value *Op1 = CI.getArgOperand(1);
+ Value *Amt = CI.getArgOperand(2);
+
+ if (IsShiftRight)
+ std::swap(Op0, Op1);
+
+ // Amount may be scalar immediate, in which case create a splat vector.
+ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
+ // we only care about the lowest log2 bits anyway.
+ if (Amt->getType() != Ty) {
+ unsigned NumElts = Ty->getVectorNumElements();
+ Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
+ Amt = Builder.CreateVectorSplat(NumElts, Amt);
+ }
+
+ Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl;
+ Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty);
+ Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt});
+
+ unsigned NumArgs = CI.getNumArgOperands();
+ if (NumArgs >= 4) { // For masked intrinsics.
+ Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) :
+ ZeroMask ? ConstantAggregateZero::get(CI.getType()) :
+ CI.getArgOperand(0);
+ Value *Mask = CI.getOperand(NumArgs - 1);
+ Res = EmitX86Select(Builder, Mask, Res, VecSrc);
+ }
+ return Res;
+}
+
static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
Value *Ptr, Value *Data, Value *Mask,
bool Aligned) {
@@ -1265,106 +1453,13 @@ static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
IID = Intrinsic::x86_avx512_dbpsadbw_512;
else
llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("vpshld.")) {
- if (VecWidth == 128 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshld_q_128;
- else if (VecWidth == 128 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshld_d_128;
- else if (VecWidth == 128 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshld_w_128;
- else if (VecWidth == 256 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshld_q_256;
- else if (VecWidth == 256 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshld_d_256;
- else if (VecWidth == 256 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshld_w_256;
- else if (VecWidth == 512 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshld_q_512;
- else if (VecWidth == 512 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshld_d_512;
- else if (VecWidth == 512 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshld_w_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("vpshrd.")) {
- if (VecWidth == 128 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshrd_q_128;
- else if (VecWidth == 128 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshrd_d_128;
- else if (VecWidth == 128 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshrd_w_128;
- else if (VecWidth == 256 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshrd_q_256;
- else if (VecWidth == 256 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshrd_d_256;
- else if (VecWidth == 256 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshrd_w_256;
- else if (VecWidth == 512 && Name[7] == 'q')
- IID = Intrinsic::x86_avx512_vpshrd_q_512;
- else if (VecWidth == 512 && Name[7] == 'd')
- IID = Intrinsic::x86_avx512_vpshrd_d_512;
- else if (VecWidth == 512 && Name[7] == 'w')
- IID = Intrinsic::x86_avx512_vpshrd_w_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("prorv.")) {
- if (VecWidth == 128 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prorv_d_128;
- else if (VecWidth == 256 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prorv_d_256;
- else if (VecWidth == 512 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prorv_d_512;
- else if (VecWidth == 128 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prorv_q_128;
- else if (VecWidth == 256 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prorv_q_256;
- else if (VecWidth == 512 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prorv_q_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("prolv.")) {
- if (VecWidth == 128 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prolv_d_128;
- else if (VecWidth == 256 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prolv_d_256;
- else if (VecWidth == 512 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prolv_d_512;
- else if (VecWidth == 128 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prolv_q_128;
- else if (VecWidth == 256 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prolv_q_256;
- else if (VecWidth == 512 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prolv_q_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("pror.")) {
- if (VecWidth == 128 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pror_d_128;
- else if (VecWidth == 256 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pror_d_256;
- else if (VecWidth == 512 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_pror_d_512;
- else if (VecWidth == 128 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pror_q_128;
- else if (VecWidth == 256 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pror_q_256;
- else if (VecWidth == 512 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_pror_q_512;
- else
- llvm_unreachable("Unexpected intrinsic");
- } else if (Name.startswith("prol.")) {
- if (VecWidth == 128 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prol_d_128;
- else if (VecWidth == 256 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prol_d_256;
- else if (VecWidth == 512 && EltWidth == 32)
- IID = Intrinsic::x86_avx512_prol_d_512;
- else if (VecWidth == 128 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prol_q_128;
- else if (VecWidth == 256 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prol_q_256;
- else if (VecWidth == 512 && EltWidth == 64)
- IID = Intrinsic::x86_avx512_prol_q_512;
+ } else if (Name.startswith("pmultishift.qb.")) {
+ if (VecWidth == 128)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_128;
+ else if (VecWidth == 256)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_256;
+ else if (VecWidth == 512)
+ IID = Intrinsic::x86_avx512_pmultishift_qb_512;
else
llvm_unreachable("Unexpected intrinsic");
} else
@@ -1654,46 +1749,44 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
C = ConstantInt::getNullValue(Builder.getInt16Ty());
Rep = Builder.CreateICmpEQ(Rep, C);
Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty());
- } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) {
- Type *I32Ty = Type::getInt32Ty(C);
- Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
- ConstantInt::get(I32Ty, 0));
- Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
- ConstantInt::get(I32Ty, 0));
- Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
- Builder.CreateFAdd(Elt0, Elt1),
- ConstantInt::get(I32Ty, 0));
- } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) {
+ } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" ||
+ Name == "sse.sub.ss" || Name == "sse2.sub.sd" ||
+ Name == "sse.mul.ss" || Name == "sse2.mul.sd" ||
+ Name == "sse.div.ss" || Name == "sse2.div.sd")) {
Type *I32Ty = Type::getInt32Ty(C);
Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
ConstantInt::get(I32Ty, 0));
Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
ConstantInt::get(I32Ty, 0));
- Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
- Builder.CreateFSub(Elt0, Elt1),
- ConstantInt::get(I32Ty, 0));
- } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) {
- Type *I32Ty = Type::getInt32Ty(C);
- Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
- ConstantInt::get(I32Ty, 0));
- Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
- ConstantInt::get(I32Ty, 0));
- Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
- Builder.CreateFMul(Elt0, Elt1),
- ConstantInt::get(I32Ty, 0));
- } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) {
- Type *I32Ty = Type::getInt32Ty(C);
- Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
- ConstantInt::get(I32Ty, 0));
- Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
- ConstantInt::get(I32Ty, 0));
- Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
- Builder.CreateFDiv(Elt0, Elt1),
+ Value *EltOp;
+ if (Name.contains(".add."))
+ EltOp = Builder.CreateFAdd(Elt0, Elt1);
+ else if (Name.contains(".sub."))
+ EltOp = Builder.CreateFSub(Elt0, Elt1);
+ else if (Name.contains(".mul."))
+ EltOp = Builder.CreateFMul(Elt0, Elt1);
+ else
+ EltOp = Builder.CreateFDiv(Elt0, Elt1);
+ Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp,
ConstantInt::get(I32Ty, 0));
} else if (IsX86 && Name.startswith("avx512.mask.pcmp")) {
// "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt."
bool CmpEq = Name[16] == 'e';
Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true);
+ } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) {
+ Type *OpTy = CI->getArgOperand(0)->getType();
+ unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
+ Intrinsic::ID IID;
+ switch (VecWidth) {
+ default: llvm_unreachable("Unexpected intrinsic");
+ case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break;
+ case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break;
+ case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break;
+ }
+
+ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
+ { CI->getOperand(0), CI->getArgOperand(1) });
+ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2));
} else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) {
Type *OpTy = CI->getArgOperand(0)->getType();
unsigned VecWidth = OpTy->getPrimitiveSizeInBits();
@@ -1948,6 +2041,23 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel);
Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel);
Rep = Builder.CreateOr(Sel0, Sel1);
+ } else if (IsX86 && (Name.startswith("xop.vprot") ||
+ Name.startswith("avx512.prol") ||
+ Name.startswith("avx512.mask.prol"))) {
+ Rep = upgradeX86Rotate(Builder, *CI, false);
+ } else if (IsX86 && (Name.startswith("avx512.pror") ||
+ Name.startswith("avx512.mask.pror"))) {
+ Rep = upgradeX86Rotate(Builder, *CI, true);
+ } else if (IsX86 && (Name.startswith("avx512.vpshld.") ||
+ Name.startswith("avx512.mask.vpshld") ||
+ Name.startswith("avx512.maskz.vpshld"))) {
+ bool ZeroMask = Name[11] == 'z';
+ Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask);
+ } else if (IsX86 && (Name.startswith("avx512.vpshrd.") ||
+ Name.startswith("avx512.mask.vpshrd") ||
+ Name.startswith("avx512.maskz.vpshrd"))) {
+ bool ZeroMask = Name[11] == 'z';
+ Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask);
} else if (IsX86 && Name == "sse42.crc32.64.8") {
Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
Intrinsic::x86_sse42_crc32_32_8);
@@ -2059,6 +2169,24 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
if (CI->getNumArgOperands() == 3)
Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
CI->getArgOperand(1));
+ } else if (IsX86 && (Name.startswith("sse2.padds.") ||
+ Name.startswith("sse2.psubs.") ||
+ Name.startswith("avx2.padds.") ||
+ Name.startswith("avx2.psubs.") ||
+ Name.startswith("avx512.padds.") ||
+ Name.startswith("avx512.psubs.") ||
+ Name.startswith("avx512.mask.padds.") ||
+ Name.startswith("avx512.mask.psubs."))) {
+ bool IsAdd = Name.contains(".padds");
+ Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd);
+ } else if (IsX86 && (Name.startswith("sse2.paddus.") ||
+ Name.startswith("sse2.psubus.") ||
+ Name.startswith("avx2.paddus.") ||
+ Name.startswith("avx2.psubus.") ||
+ Name.startswith("avx512.mask.paddus.") ||
+ Name.startswith("avx512.mask.psubus."))) {
+ bool IsAdd = Name.contains(".paddus");
+ Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd);
} else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0),
CI->getArgOperand(1),
@@ -2376,24 +2504,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.pand.")) {
- Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1));
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
- CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) {
- Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)),
- CI->getArgOperand(1));
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
- CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.por.")) {
- Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1));
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
- CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) {
- Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1));
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
- CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.and.")) {
+ } else if (IsX86 && (Name.startswith("avx512.mask.and.") ||
+ Name.startswith("avx512.mask.pand."))) {
VectorType *FTy = cast<VectorType>(CI->getType());
VectorType *ITy = VectorType::getInteger(FTy);
Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2401,7 +2513,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateBitCast(Rep, FTy);
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.andn.")) {
+ } else if (IsX86 && (Name.startswith("avx512.mask.andn.") ||
+ Name.startswith("avx512.mask.pandn."))) {
VectorType *FTy = cast<VectorType>(CI->getType());
VectorType *ITy = VectorType::getInteger(FTy);
Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
@@ -2410,7 +2523,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateBitCast(Rep, FTy);
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.or.")) {
+ } else if (IsX86 && (Name.startswith("avx512.mask.or.") ||
+ Name.startswith("avx512.mask.por."))) {
VectorType *FTy = cast<VectorType>(CI->getType());
VectorType *ITy = VectorType::getInteger(FTy);
Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2418,7 +2532,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Rep = Builder.CreateBitCast(Rep, FTy);
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.xor.")) {
+ } else if (IsX86 && (Name.startswith("avx512.mask.xor.") ||
+ Name.startswith("avx512.mask.pxor."))) {
VectorType *FTy = cast<VectorType>(CI->getType());
VectorType *ITy = VectorType::getInteger(FTy);
Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
@@ -2502,26 +2617,16 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
}
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.max.p") &&
+ } else if (IsX86 && (Name.startswith("avx512.mask.max.p") ||
+ Name.startswith("avx512.mask.min.p")) &&
Name.drop_front(18) == ".512") {
- Intrinsic::ID IID;
- if (Name[17] == 's')
- IID = Intrinsic::x86_avx512_max_ps_512;
- else
- IID = Intrinsic::x86_avx512_max_pd_512;
-
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
- { CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(4) });
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
- CI->getArgOperand(2));
- } else if (IsX86 && Name.startswith("avx512.mask.min.p") &&
- Name.drop_front(18) == ".512") {
- Intrinsic::ID IID;
- if (Name[17] == 's')
- IID = Intrinsic::x86_avx512_min_ps_512;
- else
- IID = Intrinsic::x86_avx512_min_pd_512;
+ bool IsDouble = Name[17] == 'd';
+ bool IsMin = Name[13] == 'i';
+ static const Intrinsic::ID MinMaxTbl[2][2] = {
+ { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 },
+ { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 }
+ };
+ Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble];
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
{ CI->getArgOperand(0), CI->getArgOperand(1),
@@ -3065,62 +3170,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Name.startswith("avx512.maskz.vpermt2var."))) {
bool ZeroMask = Name[11] == 'z';
bool IndexForm = Name[17] == 'i';
- unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits();
- unsigned EltWidth = CI->getType()->getScalarSizeInBits();
- bool IsFloat = CI->getType()->isFPOrFPVectorTy();
- Intrinsic::ID IID;
- if (VecWidth == 128 && EltWidth == 32 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_ps_128;
- else if (VecWidth == 128 && EltWidth == 32 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_d_128;
- else if (VecWidth == 128 && EltWidth == 64 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_pd_128;
- else if (VecWidth == 128 && EltWidth == 64 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_q_128;
- else if (VecWidth == 256 && EltWidth == 32 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_ps_256;
- else if (VecWidth == 256 && EltWidth == 32 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_d_256;
- else if (VecWidth == 256 && EltWidth == 64 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_pd_256;
- else if (VecWidth == 256 && EltWidth == 64 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_q_256;
- else if (VecWidth == 512 && EltWidth == 32 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_ps_512;
- else if (VecWidth == 512 && EltWidth == 32 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_d_512;
- else if (VecWidth == 512 && EltWidth == 64 && IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_pd_512;
- else if (VecWidth == 512 && EltWidth == 64 && !IsFloat)
- IID = Intrinsic::x86_avx512_vpermi2var_q_512;
- else if (VecWidth == 128 && EltWidth == 16)
- IID = Intrinsic::x86_avx512_vpermi2var_hi_128;
- else if (VecWidth == 256 && EltWidth == 16)
- IID = Intrinsic::x86_avx512_vpermi2var_hi_256;
- else if (VecWidth == 512 && EltWidth == 16)
- IID = Intrinsic::x86_avx512_vpermi2var_hi_512;
- else if (VecWidth == 128 && EltWidth == 8)
- IID = Intrinsic::x86_avx512_vpermi2var_qi_128;
- else if (VecWidth == 256 && EltWidth == 8)
- IID = Intrinsic::x86_avx512_vpermi2var_qi_256;
- else if (VecWidth == 512 && EltWidth == 8)
- IID = Intrinsic::x86_avx512_vpermi2var_qi_512;
- else
- llvm_unreachable("Unexpected intrinsic");
-
- Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1),
- CI->getArgOperand(2) };
-
- // If this isn't index form we need to swap operand 0 and 1.
- if (!IndexForm)
- std::swap(Args[0], Args[1]);
-
- Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID),
- Args);
- Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
- : Builder.CreateBitCast(CI->getArgOperand(1),
- CI->getType());
- Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm);
} else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") ||
Name.startswith("avx512.maskz.vpdpbusd.") ||
Name.startswith("avx512.mask.vpdpbusds.") ||
@@ -3181,6 +3231,39 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType())
: CI->getArgOperand(0);
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru);
+ } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" ||
+ Name == "addcarry.u32" || Name == "addcarry.u64" ||
+ Name == "subborrow.u32" || Name == "subborrow.u64")) {
+ Intrinsic::ID IID;
+ if (Name[0] == 'a' && Name.back() == '2')
+ IID = Intrinsic::x86_addcarry_32;
+ else if (Name[0] == 'a' && Name.back() == '4')
+ IID = Intrinsic::x86_addcarry_64;
+ else if (Name[0] == 's' && Name.back() == '2')
+ IID = Intrinsic::x86_subborrow_32;
+ else if (Name[0] == 's' && Name.back() == '4')
+ IID = Intrinsic::x86_subborrow_64;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+
+ // Make a call with 3 operands.
+ Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2)};
+ Value *NewCall = Builder.CreateCall(
+ Intrinsic::getDeclaration(CI->getModule(), IID),
+ Args);
+
+ // Extract the second result and store it.
+ Value *Data = Builder.CreateExtractValue(NewCall, 1);
+ // Cast the pointer to the right type.
+ Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3),
+ llvm::PointerType::getUnqual(Data->getType()));
+ Builder.CreateAlignedStore(Data, Ptr, 1);
+ // Replace the original call result with the first result of the new call.
+ Value *CF = Builder.CreateExtractValue(NewCall, 0);
+
+ CI->replaceAllUsesWith(CF);
+ Rep = nullptr;
} else if (IsX86 && Name.startswith("avx512.mask.") &&
upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) {
// Rep will be updated by the call in the condition.
@@ -3356,6 +3439,32 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
break;
}
+ case Intrinsic::x86_rdtscp: {
+ // This used to take 1 arguments. If we have no arguments, it is already
+ // upgraded.
+ if (CI->getNumOperands() == 0)
+ return;
+
+ NewCall = Builder.CreateCall(NewFn);
+ // Extract the second result and store it.
+ Value *Data = Builder.CreateExtractValue(NewCall, 1);
+ // Cast the pointer to the right type.
+ Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0),
+ llvm::PointerType::getUnqual(Data->getType()));
+ Builder.CreateAlignedStore(Data, Ptr, 1);
+ // Replace the original call result with the first result of the new call.
+ Value *TSC = Builder.CreateExtractValue(NewCall, 0);
+
+ std::string Name = CI->getName();
+ if (!Name.empty()) {
+ CI->setName(Name + ".old");
+ NewCall->setName(Name);
+ }
+ CI->replaceAllUsesWith(TSC);
+ CI->eraseFromParent();
+ return;
+ }
+
case Intrinsic::x86_sse41_insertps:
case Intrinsic::x86_sse41_dppd:
case Intrinsic::x86_sse41_dpps:
diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp
index 7c3e5862d1cd..375924360dda 100644
--- a/lib/IR/BasicBlock.cpp
+++ b/lib/IR/BasicBlock.cpp
@@ -135,9 +135,10 @@ const Module *BasicBlock::getModule() const {
return getParent()->getParent();
}
-const TerminatorInst *BasicBlock::getTerminator() const {
- if (InstList.empty()) return nullptr;
- return dyn_cast<TerminatorInst>(&InstList.back());
+const Instruction *BasicBlock::getTerminator() const {
+ if (InstList.empty() || !InstList.back().isTerminator())
+ return nullptr;
+ return &InstList.back();
}
const CallInst *BasicBlock::getTerminatingMustTailCall() const {
@@ -205,10 +206,8 @@ const Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() const {
if (isa<PHINode>(I) || isa<DbgInfoIntrinsic>(I))
continue;
- if (auto *II = dyn_cast<IntrinsicInst>(&I))
- if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end)
- continue;
+ if (I.isLifetimeStartOrEnd())
+ continue;
return &I;
}
@@ -259,6 +258,14 @@ const BasicBlock *BasicBlock::getUniquePredecessor() const {
return PredBB;
}
+bool BasicBlock::hasNPredecessors(unsigned N) const {
+ return hasNItems(pred_begin(this), pred_end(this), N);
+}
+
+bool BasicBlock::hasNPredecessorsOrMore(unsigned N) const {
+ return hasNItemsOrMore(pred_begin(this), pred_end(this), N);
+}
+
const BasicBlock *BasicBlock::getSingleSuccessor() const {
succ_const_iterator SI = succ_begin(this), E = succ_end(this);
if (SI == E) return nullptr; // no successors
@@ -384,7 +391,7 @@ bool BasicBlock::isLegalToHoistInto() const {
assert(Term->getNumSuccessors() > 0);
// Instructions should not be hoisted across exception handling boundaries.
- return !Term->isExceptional();
+ return !Term->isExceptionalTerminator();
}
/// This splits a basic block into two at the specified
@@ -437,12 +444,12 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
}
void BasicBlock::replaceSuccessorsPhiUsesWith(BasicBlock *New) {
- TerminatorInst *TI = getTerminator();
+ Instruction *TI = getTerminator();
if (!TI)
// Cope with being called on a BasicBlock that doesn't have a terminator
// yet. Clang's CodeGenFunction::EmitReturnBlock() likes to do this.
return;
- for (BasicBlock *Succ : TI->successors()) {
+ for (BasicBlock *Succ : successors(TI)) {
// N.B. Succ might not be a complete BasicBlock, so don't assume
// that it ends with a non-phi instruction.
for (iterator II = Succ->begin(), IE = Succ->end(); II != IE; ++II) {
@@ -468,7 +475,7 @@ const LandingPadInst *BasicBlock::getLandingPadInst() const {
}
Optional<uint64_t> BasicBlock::getIrrLoopHeaderWeight() const {
- const TerminatorInst *TI = getTerminator();
+ const Instruction *TI = getTerminator();
if (MDNode *MDIrrLoopHeader =
TI->getMetadata(LLVMContext::MD_irr_loop)) {
MDString *MDName = cast<MDString>(MDIrrLoopHeader->getOperand(0));
diff --git a/lib/IR/CMakeLists.txt b/lib/IR/CMakeLists.txt
index 0a78a0f8d81b..2586f9872897 100644
--- a/lib/IR/CMakeLists.txt
+++ b/lib/IR/CMakeLists.txt
@@ -42,8 +42,10 @@ add_llvm_library(LLVMCore
Operator.cpp
OptBisect.cpp
Pass.cpp
+ PassInstrumentation.cpp
PassManager.cpp
PassRegistry.cpp
+ PassTimingInfo.cpp
SafepointIRVerifier.cpp
ProfileSummary.cpp
Statepoint.cpp
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 90a8366d1696..57de6b042303 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -916,13 +916,14 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
return ConstantVector::get(Result);
}
-
-Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
- Constant *C1, Constant *C2) {
+Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
+ Constant *C2) {
assert(Instruction::isBinaryOp(Opcode) && "Non-binary instruction detected");
- // Handle UndefValue up front.
- if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
+ // Handle scalar UndefValue. Vectors are always evaluated per element.
+ bool HasScalarUndef = !C1->getType()->isVectorTy() &&
+ (isa<UndefValue>(C1) || isa<UndefValue>(C2));
+ if (HasScalarUndef) {
switch (static_cast<Instruction::BinaryOps>(Opcode)) {
case Instruction::Xor:
if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
@@ -1024,9 +1025,8 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
}
- // At this point neither constant should be an UndefValue.
- assert(!isa<UndefValue>(C1) && !isa<UndefValue>(C2) &&
- "Unexpected UndefValue");
+ // Neither constant should be UndefValue, unless these are vector constants.
+ assert(!HasScalarUndef && "Unexpected UndefValue");
// Handle simplifications when the RHS is a constant int.
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
@@ -1218,7 +1218,7 @@ Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode,
}
}
} else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
- // Perform elementwise folding.
+ // Fold each element and create a vector constant from those constants.
SmallVector<Constant*, 16> Result;
Type *Ty = IntegerType::get(VTy->getContext(), 32);
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
@@ -2052,7 +2052,7 @@ static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
static bool isIndexInRangeOfArrayType(uint64_t NumElements,
const ConstantInt *CI) {
// We cannot bounds check the index if it doesn't fit in an int64_t.
- if (CI->getValue().getActiveBits() > 64)
+ if (CI->getValue().getMinSignedBits() > 64)
return false;
// A negative index or an index past the end of our sequential type is
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index 2351e7e4a389..d36967fdcfe1 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -184,18 +184,15 @@ bool Constant::isNotMinSignedValue() const {
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(this))
return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue();
- // Check for constant vectors which are splats of INT_MIN values.
- if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
- if (Constant *Splat = CV->getSplatValue())
- return Splat->isNotMinSignedValue();
-
- // Check for constant vectors which are splats of INT_MIN values.
- if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this)) {
- if (CV->isSplat()) {
- if (CV->getElementType()->isFloatingPointTy())
- return !CV->getElementAsAPFloat(0).bitcastToAPInt().isMinSignedValue();
- return !CV->getElementAsAPInt(0).isMinSignedValue();
+ // Check that vectors don't contain INT_MIN
+ if (this->getType()->isVectorTy()) {
+ unsigned NumElts = this->getType()->getVectorNumElements();
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Constant *Elt = this->getAggregateElement(i);
+ if (!Elt || !Elt->isNotMinSignedValue())
+ return false;
}
+ return true;
}
// It *may* contain INT_MIN, we can't tell.
@@ -353,8 +350,12 @@ Constant *Constant::getAggregateElement(unsigned Elt) const {
Constant *Constant::getAggregateElement(Constant *Elt) const {
assert(isa<IntegerType>(Elt->getType()) && "Index must be an integer");
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt))
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(Elt)) {
+ // Check if the constant fits into an uint64_t.
+ if (CI->getValue().getActiveBits() > 64)
+ return nullptr;
return getAggregateElement(CI->getZExtValue());
+ }
return nullptr;
}
@@ -722,9 +723,9 @@ Constant *ConstantFP::get(Type *Ty, StringRef Str) {
return C;
}
-Constant *ConstantFP::getNaN(Type *Ty, bool Negative, unsigned Type) {
+Constant *ConstantFP::getNaN(Type *Ty, bool Negative, uint64_t Payload) {
const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
- APFloat NaN = APFloat::getNaN(Semantics, Negative, Type);
+ APFloat NaN = APFloat::getNaN(Semantics, Negative, Payload);
Constant *C = get(Ty->getContext(), NaN);
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
@@ -733,6 +734,28 @@ Constant *ConstantFP::getNaN(Type *Ty, bool Negative, unsigned Type) {
return C;
}
+Constant *ConstantFP::getQNaN(Type *Ty, bool Negative, APInt *Payload) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ APFloat NaN = APFloat::getQNaN(Semantics, Negative, Payload);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
+Constant *ConstantFP::getSNaN(Type *Ty, bool Negative, APInt *Payload) {
+ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
+ APFloat NaN = APFloat::getSNaN(Semantics, Negative, Payload);
+ Constant *C = get(Ty->getContext(), NaN);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ return ConstantVector::getSplat(VTy->getNumElements(), C);
+
+ return C;
+}
+
Constant *ConstantFP::getNegativeZero(Type *Ty) {
const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType());
APFloat NegZero = APFloat::getZero(Semantics, /*Negative=*/true);
@@ -940,7 +963,7 @@ ConstantAggregate::ConstantAggregate(CompositeType *T, ValueTy VT,
ArrayRef<Constant *> V)
: Constant(T, VT, OperandTraits<ConstantAggregate>::op_end(this) - V.size(),
V.size()) {
- std::copy(V.begin(), V.end(), op_begin());
+ llvm::copy(V, op_begin());
// Check that types match, unless this is an opaque struct.
if (auto *ST = dyn_cast<StructType>(T))
@@ -1780,6 +1803,36 @@ Constant *ConstantExpr::getAddrSpaceCast(Constant *C, Type *DstTy,
return getFoldedCast(Instruction::AddrSpaceCast, C, DstTy, OnlyIfReduced);
}
+Constant *ConstantExpr::get(unsigned Opcode, Constant *C, unsigned Flags,
+ Type *OnlyIfReducedTy) {
+ // Check the operands for consistency first.
+ assert(Instruction::isUnaryOp(Opcode) &&
+ "Invalid opcode in unary constant expression");
+
+#ifndef NDEBUG
+ switch (Opcode) {
+ case Instruction::FNeg:
+ assert(C->getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ default:
+ break;
+ }
+#endif
+
+ // TODO: Try to constant fold operation.
+
+ if (OnlyIfReducedTy == C->getType())
+ return nullptr;
+
+ Constant *ArgVec[] = { C };
+ ConstantExprKeyType Key(Opcode, ArgVec, 0, Flags);
+
+ LLVMContextImpl *pImpl = C->getContext().pImpl;
+ return pImpl->ExprConstants.getOrCreate(C->getType(), Key);
+}
+
Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
unsigned Flags, Type *OnlyIfReducedTy) {
// Check the operands for consistency first.
@@ -1946,9 +1999,8 @@ Constant *ConstantExpr::getGetElementPtr(Type *Ty, Constant *C,
if (!Ty)
Ty = cast<PointerType>(C->getType()->getScalarType())->getElementType();
else
- assert(
- Ty ==
- cast<PointerType>(C->getType()->getScalarType())->getContainedType(0u));
+ assert(Ty ==
+ cast<PointerType>(C->getType()->getScalarType())->getElementType());
if (Constant *FC =
ConstantFoldGetElementPtr(Ty, C, InBounds, InRangeIndex, Idxs))
diff --git a/lib/IR/ConstantsContext.h b/lib/IR/ConstantsContext.h
index e9f31e4ded68..eac171397084 100644
--- a/lib/IR/ConstantsContext.h
+++ b/lib/IR/ConstantsContext.h
@@ -529,7 +529,9 @@ struct ConstantExprKeyType {
ConstantExpr *create(TypeClass *Ty) const {
switch (Opcode) {
default:
- if (Instruction::isCast(Opcode))
+ if (Instruction::isCast(Opcode) ||
+ (Opcode >= Instruction::UnaryOpsBegin &&
+ Opcode < Instruction::UnaryOpsEnd))
return new UnaryConstantExpr(Opcode, Ops[0], Ty);
if ((Opcode >= Instruction::BinaryOpsBegin &&
Opcode < Instruction::BinaryOpsEnd))
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index bea4dee15c13..815797f4b7ea 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -15,8 +15,8 @@
#include "llvm-c/Core.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
@@ -107,6 +107,14 @@ void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
unwrap(C)->setYieldCallback(YieldCallback, OpaqueHandle);
}
+LLVMBool LLVMContextShouldDiscardValueNames(LLVMContextRef C) {
+ return unwrap(C)->shouldDiscardValueNames();
+}
+
+void LLVMContextSetDiscardValueNames(LLVMContextRef C, LLVMBool Discard) {
+ unwrap(C)->setDiscardValueNames(Discard);
+}
+
void LLVMContextDispose(LLVMContextRef C) {
delete unwrap(C);
}
@@ -706,6 +714,10 @@ LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy) {
return unwrap<StructType>(StructTy)->isOpaque();
}
+LLVMBool LLVMIsLiteralStruct(LLVMTypeRef StructTy) {
+ return unwrap<StructType>(StructTy)->isLiteral();
+}
+
LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name) {
return wrap(unwrap(M)->getTypeByName(Name));
}
@@ -868,6 +880,38 @@ void LLVMSetMetadata(LLVMValueRef Inst, unsigned KindID, LLVMValueRef Val) {
unwrap<Instruction>(Inst)->setMetadata(KindID, N);
}
+struct LLVMOpaqueValueMetadataEntry {
+ unsigned Kind;
+ LLVMMetadataRef Metadata;
+};
+
+using MetadataEntries = SmallVectorImpl<std::pair<unsigned, MDNode *>>;
+static LLVMValueMetadataEntry *
+llvm_getMetadata(size_t *NumEntries,
+ llvm::function_ref<void(MetadataEntries &)> AccessMD) {
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MVEs;
+ AccessMD(MVEs);
+
+ LLVMOpaqueValueMetadataEntry *Result =
+ static_cast<LLVMOpaqueValueMetadataEntry *>(
+ safe_malloc(MVEs.size() * sizeof(LLVMOpaqueValueMetadataEntry)));
+ for (unsigned i = 0; i < MVEs.size(); ++i) {
+ const auto &ModuleFlag = MVEs[i];
+ Result[i].Kind = ModuleFlag.first;
+ Result[i].Metadata = wrap(ModuleFlag.second);
+ }
+ *NumEntries = MVEs.size();
+ return Result;
+}
+
+LLVMValueMetadataEntry *
+LLVMInstructionGetAllMetadataOtherThanDebugLoc(LLVMValueRef Value,
+ size_t *NumEntries) {
+ return llvm_getMetadata(NumEntries, [&Value](MetadataEntries &Entries) {
+ unwrap<Instruction>(Value)->getAllMetadata(Entries);
+ });
+}
+
/*--.. Conversion functions ................................................--*/
#define LLVM_DEFINE_VALUE_CAST(name) \
@@ -1065,6 +1109,54 @@ unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V) {
return cast<MDNode>(MD->getMetadata())->getNumOperands();
}
+LLVMNamedMDNodeRef LLVMGetFirstNamedMetadata(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::named_metadata_iterator I = Mod->named_metadata_begin();
+ if (I == Mod->named_metadata_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMNamedMDNodeRef LLVMGetLastNamedMetadata(LLVMModuleRef M) {
+ Module *Mod = unwrap(M);
+ Module::named_metadata_iterator I = Mod->named_metadata_end();
+ if (I == Mod->named_metadata_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMNamedMDNodeRef LLVMGetNextNamedMetadata(LLVMNamedMDNodeRef NMD) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ Module::named_metadata_iterator I(NamedNode);
+ if (++I == NamedNode->getParent()->named_metadata_end())
+ return nullptr;
+ return wrap(&*I);
+}
+
+LLVMNamedMDNodeRef LLVMGetPreviousNamedMetadata(LLVMNamedMDNodeRef NMD) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ Module::named_metadata_iterator I(NamedNode);
+ if (I == NamedNode->getParent()->named_metadata_begin())
+ return nullptr;
+ return wrap(&*--I);
+}
+
+LLVMNamedMDNodeRef LLVMGetNamedMetadata(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getNamedMetadata(StringRef(Name, NameLen)));
+}
+
+LLVMNamedMDNodeRef LLVMGetOrInsertNamedMetadata(LLVMModuleRef M,
+ const char *Name, size_t NameLen) {
+ return wrap(unwrap(M)->getOrInsertNamedMetadata({Name, NameLen}));
+}
+
+const char *LLVMGetNamedMetadataName(LLVMNamedMDNodeRef NMD, size_t *NameLen) {
+ NamedMDNode *NamedNode = unwrap<NamedMDNode>(NMD);
+ *NameLen = NamedNode->getName().size();
+ return NamedNode->getName().data();
+}
+
void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest) {
auto *MD = cast<MetadataAsValue>(unwrap(V));
if (auto *MDV = dyn_cast<ValueAsMetadata>(MD->getMetadata())) {
@@ -1105,6 +1197,78 @@ void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char *Name,
N->addOperand(extractMDNode(unwrap<MetadataAsValue>(Val)));
}
+const char *LLVMGetDebugLocDirectory(LLVMValueRef Val, unsigned *Length) {
+ if (!Length) return nullptr;
+ StringRef S;
+ if (const auto *I = unwrap<Instruction>(Val)) {
+ S = I->getDebugLoc()->getDirectory();
+ } else if (const auto *GV = unwrap<GlobalVariable>(Val)) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ S = DGV->getDirectory();
+ } else if (const auto *F = unwrap<Function>(Val)) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ S = DSP->getDirectory();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return nullptr;
+ }
+ *Length = S.size();
+ return S.data();
+}
+
+const char *LLVMGetDebugLocFilename(LLVMValueRef Val, unsigned *Length) {
+ if (!Length) return nullptr;
+ StringRef S;
+ if (const auto *I = unwrap<Instruction>(Val)) {
+ S = I->getDebugLoc()->getFilename();
+ } else if (const auto *GV = unwrap<GlobalVariable>(Val)) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ S = DGV->getFilename();
+ } else if (const auto *F = unwrap<Function>(Val)) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ S = DSP->getFilename();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return nullptr;
+ }
+ *Length = S.size();
+ return S.data();
+}
+
+unsigned LLVMGetDebugLocLine(LLVMValueRef Val) {
+ unsigned L = 0;
+ if (const auto *I = unwrap<Instruction>(Val)) {
+ L = I->getDebugLoc()->getLine();
+ } else if (const auto *GV = unwrap<GlobalVariable>(Val)) {
+ SmallVector<DIGlobalVariableExpression *, 1> GVEs;
+ GV->getDebugInfo(GVEs);
+ if (GVEs.size())
+ if (const DIGlobalVariable *DGV = GVEs[0]->getVariable())
+ L = DGV->getLine();
+ } else if (const auto *F = unwrap<Function>(Val)) {
+ if (const DISubprogram *DSP = F->getSubprogram())
+ L = DSP->getLine();
+ } else {
+ assert(0 && "Expected Instruction, GlobalVariable or Function");
+ return -1;
+ }
+ return L;
+}
+
+unsigned LLVMGetDebugLocColumn(LLVMValueRef Val) {
+ unsigned C = 0;
+ if (const auto *I = unwrap<Instruction>(Val))
+ if (const auto &L = I->getDebugLoc())
+ C = L->getColumn();
+ return C;
+}
+
/*--.. Operations on scalar constants ......................................--*/
LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
@@ -1453,17 +1617,21 @@ LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices, unsigned NumIndices) {
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
- return wrap(ConstantExpr::getGetElementPtr(
- nullptr, unwrap<Constant>(ConstantVal), IdxList));
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ Type *Ty =
+ cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ return wrap(ConstantExpr::getGetElementPtr(Ty, Val, IdxList));
}
LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
LLVMValueRef *ConstantIndices,
unsigned NumIndices) {
- Constant* Val = unwrap<Constant>(ConstantVal);
ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
NumIndices);
- return wrap(ConstantExpr::getInBoundsGetElementPtr(nullptr, Val, IdxList));
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ Type *Ty =
+ cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(Ty, Val, IdxList));
}
LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType) {
@@ -1792,6 +1960,10 @@ void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr) {
: GlobalValue::UnnamedAddr::None);
}
+LLVMTypeRef LLVMGlobalGetValueType(LLVMValueRef Global) {
+ return wrap(unwrap<GlobalValue>(Global)->getValueType());
+}
+
/*--.. Operations on global variables, load and store instructions .........--*/
unsigned LLVMGetAlignment(LLVMValueRef V) {
@@ -1824,6 +1996,49 @@ void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) {
"only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment");
}
+LLVMValueMetadataEntry *LLVMGlobalCopyAllMetadata(LLVMValueRef Value,
+ size_t *NumEntries) {
+ return llvm_getMetadata(NumEntries, [&Value](MetadataEntries &Entries) {
+ if (Instruction *Instr = dyn_cast<Instruction>(unwrap(Value))) {
+ Instr->getAllMetadata(Entries);
+ } else {
+ unwrap<GlobalObject>(Value)->getAllMetadata(Entries);
+ }
+ });
+}
+
+unsigned LLVMValueMetadataEntriesGetKind(LLVMValueMetadataEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueValueMetadataEntry MVE =
+ static_cast<LLVMOpaqueValueMetadataEntry>(Entries[Index]);
+ return MVE.Kind;
+}
+
+LLVMMetadataRef
+LLVMValueMetadataEntriesGetMetadata(LLVMValueMetadataEntry *Entries,
+ unsigned Index) {
+ LLVMOpaqueValueMetadataEntry MVE =
+ static_cast<LLVMOpaqueValueMetadataEntry>(Entries[Index]);
+ return MVE.Metadata;
+}
+
+void LLVMDisposeValueMetadataEntries(LLVMValueMetadataEntry *Entries) {
+ free(Entries);
+}
+
+void LLVMGlobalSetMetadata(LLVMValueRef Global, unsigned Kind,
+ LLVMMetadataRef MD) {
+ unwrap<GlobalObject>(Global)->setMetadata(Kind, unwrap<MDNode>(MD));
+}
+
+void LLVMGlobalEraseMetadata(LLVMValueRef Global, unsigned Kind) {
+ unwrap<GlobalObject>(Global)->eraseMetadata(Kind);
+}
+
+void LLVMGlobalClearMetadata(LLVMValueRef Global) {
+ unwrap<GlobalObject>(Global)->clearMetadata();
+}
+
/*--.. Operations on global variables ......................................--*/
LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name) {
@@ -2076,6 +2291,50 @@ unsigned LLVMGetIntrinsicID(LLVMValueRef Fn) {
return 0;
}
+static Intrinsic::ID llvm_map_to_intrinsic_id(unsigned ID) {
+ assert(ID < llvm::Intrinsic::num_intrinsics && "Intrinsic ID out of range");
+ return llvm::Intrinsic::ID(ID);
+}
+
+LLVMValueRef LLVMGetIntrinsicDeclaration(LLVMModuleRef Mod,
+ unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount) {
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ return wrap(llvm::Intrinsic::getDeclaration(unwrap(Mod), IID, Tys));
+}
+
+const char *LLVMIntrinsicGetName(unsigned ID, size_t *NameLength) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ auto Str = llvm::Intrinsic::getName(IID);
+ *NameLength = Str.size();
+ return Str.data();
+}
+
+LLVMTypeRef LLVMIntrinsicGetType(LLVMContextRef Ctx, unsigned ID,
+ LLVMTypeRef *ParamTypes, size_t ParamCount) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ return wrap(llvm::Intrinsic::getType(*unwrap(Ctx), IID, Tys));
+}
+
+const char *LLVMIntrinsicCopyOverloadedName(unsigned ID,
+ LLVMTypeRef *ParamTypes,
+ size_t ParamCount,
+ size_t *NameLength) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ ArrayRef<Type*> Tys(unwrap(ParamTypes), ParamCount);
+ auto Str = llvm::Intrinsic::getName(IID, Tys);
+ *NameLength = Str.length();
+ return strdup(Str.c_str());
+}
+
+LLVMBool LLVMIntrinsicIsOverloaded(unsigned ID) {
+ auto IID = llvm_map_to_intrinsic_id(ID);
+ return llvm::Intrinsic::isOverloaded(IID);
+}
+
unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn) {
return unwrap<Function>(Fn)->getCallingConv();
}
@@ -2277,6 +2536,11 @@ LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB) {
return wrap(&*--I);
}
+LLVMBasicBlockRef LLVMCreateBasicBlockInContext(LLVMContextRef C,
+ const char *Name) {
+ return wrap(llvm::BasicBlock::Create(*unwrap(C), Name));
+}
+
LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
LLVMValueRef FnRef,
const char *Name) {
@@ -2391,47 +2655,52 @@ LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst) {
return nullptr;
}
+LLVMValueRef LLVMIsATerminatorInst(LLVMValueRef Inst) {
+ Instruction *I = dyn_cast<Instruction>(unwrap(Inst));
+ return (I && I->isTerminator()) ? wrap(I) : nullptr;
+}
+
unsigned LLVMGetNumArgOperands(LLVMValueRef Instr) {
if (FuncletPadInst *FPI = dyn_cast<FuncletPadInst>(unwrap(Instr))) {
return FPI->getNumArgOperands();
}
- return CallSite(unwrap<Instruction>(Instr)).getNumArgOperands();
+ return unwrap<CallBase>(Instr)->getNumArgOperands();
}
/*--.. Call and invoke instructions ........................................--*/
unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr) {
- return CallSite(unwrap<Instruction>(Instr)).getCallingConv();
+ return unwrap<CallBase>(Instr)->getCallingConv();
}
void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) {
- return CallSite(unwrap<Instruction>(Instr))
- .setCallingConv(static_cast<CallingConv::ID>(CC));
+ return unwrap<CallBase>(Instr)->setCallingConv(
+ static_cast<CallingConv::ID>(CC));
}
void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
unsigned align) {
- CallSite Call = CallSite(unwrap<Instruction>(Instr));
+ auto *Call = unwrap<CallBase>(Instr);
Attribute AlignAttr = Attribute::getWithAlignment(Call->getContext(), align);
- Call.addAttribute(index, AlignAttr);
+ Call->addAttribute(index, AlignAttr);
}
void LLVMAddCallSiteAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
LLVMAttributeRef A) {
- CallSite(unwrap<Instruction>(C)).addAttribute(Idx, unwrap(A));
+ unwrap<CallBase>(C)->addAttribute(Idx, unwrap(A));
}
unsigned LLVMGetCallSiteAttributeCount(LLVMValueRef C,
LLVMAttributeIndex Idx) {
- auto CS = CallSite(unwrap<Instruction>(C));
- auto AS = CS.getAttributes().getAttributes(Idx);
+ auto *Call = unwrap<CallBase>(C);
+ auto AS = Call->getAttributes().getAttributes(Idx);
return AS.getNumAttributes();
}
void LLVMGetCallSiteAttributes(LLVMValueRef C, LLVMAttributeIndex Idx,
LLVMAttributeRef *Attrs) {
- auto CS = CallSite(unwrap<Instruction>(C));
- auto AS = CS.getAttributes().getAttributes(Idx);
+ auto *Call = unwrap<CallBase>(C);
+ auto AS = Call->getAttributes().getAttributes(Idx);
for (auto A : AS)
*Attrs++ = wrap(A);
}
@@ -2439,30 +2708,32 @@ void LLVMGetCallSiteAttributes(LLVMValueRef C, LLVMAttributeIndex Idx,
LLVMAttributeRef LLVMGetCallSiteEnumAttribute(LLVMValueRef C,
LLVMAttributeIndex Idx,
unsigned KindID) {
- return wrap(CallSite(unwrap<Instruction>(C))
- .getAttribute(Idx, (Attribute::AttrKind)KindID));
+ return wrap(
+ unwrap<CallBase>(C)->getAttribute(Idx, (Attribute::AttrKind)KindID));
}
LLVMAttributeRef LLVMGetCallSiteStringAttribute(LLVMValueRef C,
LLVMAttributeIndex Idx,
const char *K, unsigned KLen) {
- return wrap(CallSite(unwrap<Instruction>(C))
- .getAttribute(Idx, StringRef(K, KLen)));
+ return wrap(unwrap<CallBase>(C)->getAttribute(Idx, StringRef(K, KLen)));
}
void LLVMRemoveCallSiteEnumAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
unsigned KindID) {
- CallSite(unwrap<Instruction>(C))
- .removeAttribute(Idx, (Attribute::AttrKind)KindID);
+ unwrap<CallBase>(C)->removeAttribute(Idx, (Attribute::AttrKind)KindID);
}
void LLVMRemoveCallSiteStringAttribute(LLVMValueRef C, LLVMAttributeIndex Idx,
const char *K, unsigned KLen) {
- CallSite(unwrap<Instruction>(C)).removeAttribute(Idx, StringRef(K, KLen));
+ unwrap<CallBase>(C)->removeAttribute(Idx, StringRef(K, KLen));
}
LLVMValueRef LLVMGetCalledValue(LLVMValueRef Instr) {
- return wrap(CallSite(unwrap<Instruction>(Instr)).getCalledValue());
+ return wrap(unwrap<CallBase>(Instr)->getCalledValue());
+}
+
+LLVMTypeRef LLVMGetCalledFunctionType(LLVMValueRef Instr) {
+ return wrap(unwrap<CallBase>(Instr)->getFunctionType());
}
/*--.. Operations on call instructions (only) ..............................--*/
@@ -2506,15 +2777,15 @@ void LLVMSetUnwindDest(LLVMValueRef Invoke, LLVMBasicBlockRef B) {
/*--.. Operations on terminators ...........................................--*/
unsigned LLVMGetNumSuccessors(LLVMValueRef Term) {
- return unwrap<TerminatorInst>(Term)->getNumSuccessors();
+ return unwrap<Instruction>(Term)->getNumSuccessors();
}
LLVMBasicBlockRef LLVMGetSuccessor(LLVMValueRef Term, unsigned i) {
- return wrap(unwrap<TerminatorInst>(Term)->getSuccessor(i));
+ return wrap(unwrap<Instruction>(Term)->getSuccessor(i));
}
void LLVMSetSuccessor(LLVMValueRef Term, unsigned i, LLVMBasicBlockRef block) {
- return unwrap<TerminatorInst>(Term)->setSuccessor(i,unwrap(block));
+ return unwrap<Instruction>(Term)->setSuccessor(i, unwrap(block));
}
/*--.. Operations on branch instructions (only) ............................--*/
@@ -2584,6 +2855,8 @@ unsigned LLVMGetNumIndices(LLVMValueRef Inst) {
return EV->getNumIndices();
if (auto *IV = dyn_cast<InsertValueInst>(I))
return IV->getNumIndices();
+ if (auto *CE = dyn_cast<ConstantExpr>(I))
+ return CE->getIndices().size();
llvm_unreachable(
"LLVMGetNumIndices applies only to extractvalue and insertvalue!");
}
@@ -2594,6 +2867,8 @@ const unsigned *LLVMGetIndices(LLVMValueRef Inst) {
return EV->getIndices().data();
if (auto *IV = dyn_cast<InsertValueInst>(I))
return IV->getIndices().data();
+ if (auto *CE = dyn_cast<ConstantExpr>(I))
+ return CE->getIndices().data();
llvm_unreachable(
"LLVMGetIndices applies only to extractvalue and insertvalue!");
}
@@ -2704,9 +2979,22 @@ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
const char *Name) {
- return wrap(unwrap(B)->CreateInvoke(unwrap(Fn), unwrap(Then), unwrap(Catch),
- makeArrayRef(unwrap(Args), NumArgs),
- Name));
+ Value *V = unwrap(Fn);
+ FunctionType *FnT =
+ cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+
+ return wrap(
+ unwrap(B)->CreateInvoke(FnT, unwrap(Fn), unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildInvoke2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name) {
+ return wrap(unwrap(B)->CreateInvoke(
+ unwrap<FunctionType>(Ty), unwrap(Fn), unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
}
LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
@@ -3021,6 +3309,30 @@ LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef B, LLVMTypeRef Ty,
return wrap(unwrap(B)->Insert(Malloc, Twine(Name)));
}
+LLVMValueRef LLVMBuildMemSet(LLVMBuilderRef B, LLVMValueRef Ptr,
+ LLVMValueRef Val, LLVMValueRef Len,
+ unsigned Align) {
+ return wrap(unwrap(B)->CreateMemSet(unwrap(Ptr), unwrap(Val), unwrap(Len), Align));
+}
+
+LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size) {
+ return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), DstAlign,
+ unwrap(Src), SrcAlign,
+ unwrap(Size)));
+}
+
+LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size) {
+ return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), DstAlign,
+ unwrap(Src), SrcAlign,
+ unwrap(Size)));
+}
+
LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef B, LLVMTypeRef Ty,
const char *Name) {
return wrap(unwrap(B)->CreateAlloca(unwrap(Ty), nullptr, Name));
@@ -3038,7 +3350,15 @@ LLVMValueRef LLVMBuildFree(LLVMBuilderRef B, LLVMValueRef PointerVal) {
LLVMValueRef LLVMBuildLoad(LLVMBuilderRef B, LLVMValueRef PointerVal,
const char *Name) {
- return wrap(unwrap(B)->CreateLoad(unwrap(PointerVal), Name));
+ Value *V = unwrap(PointerVal);
+ PointerType *Ty = cast<PointerType>(V->getType());
+
+ return wrap(unwrap(B)->CreateLoad(Ty->getElementType(), V, Name));
+}
+
+LLVMValueRef LLVMBuildLoad2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PointerVal, const char *Name) {
+ return wrap(unwrap(B)->CreateLoad(unwrap(Ty), unwrap(PointerVal), Name));
}
LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val,
@@ -3093,20 +3413,50 @@ LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name) {
ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
- return wrap(unwrap(B)->CreateGEP(nullptr, unwrap(Pointer), IdxList, Name));
+ Value *Val = unwrap(Pointer);
+ Type *Ty =
+ cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ return wrap(unwrap(B)->CreateGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ return wrap(unwrap(B)->CreateGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
}
LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
LLVMValueRef *Indices, unsigned NumIndices,
const char *Name) {
ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
+ Value *Val = unwrap(Pointer);
+ Type *Ty =
+ cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ return wrap(unwrap(B)->CreateInBoundsGEP(Ty, Val, IdxList, Name));
+}
+
+LLVMValueRef LLVMBuildInBoundsGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, LLVMValueRef *Indices,
+ unsigned NumIndices, const char *Name) {
+ ArrayRef<Value *> IdxList(unwrap(Indices), NumIndices);
return wrap(
- unwrap(B)->CreateInBoundsGEP(nullptr, unwrap(Pointer), IdxList, Name));
+ unwrap(B)->CreateInBoundsGEP(unwrap(Ty), unwrap(Pointer), IdxList, Name));
}
LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
unsigned Idx, const char *Name) {
- return wrap(unwrap(B)->CreateStructGEP(nullptr, unwrap(Pointer), Idx, Name));
+ Value *Val = unwrap(Pointer);
+ Type *Ty =
+ cast<PointerType>(Val->getType()->getScalarType())->getElementType();
+ return wrap(unwrap(B)->CreateStructGEP(Ty, Val, Idx, Name));
+}
+
+LLVMValueRef LLVMBuildStructGEP2(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef Pointer, unsigned Idx,
+ const char *Name) {
+ return wrap(
+ unwrap(B)->CreateStructGEP(unwrap(Ty), unwrap(Pointer), Idx, Name));
}
LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
@@ -3248,6 +3598,13 @@ LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef B, LLVMValueRef Val,
return wrap(unwrap(B)->CreatePointerCast(unwrap(Val), unwrap(DestTy), Name));
}
+LLVMValueRef LLVMBuildIntCast2(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, LLVMBool IsSigned,
+ const char *Name) {
+ return wrap(
+ unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), IsSigned, Name));
+}
+
LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
LLVMTypeRef DestTy, const char *Name) {
return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy),
@@ -3284,9 +3641,20 @@ LLVMValueRef LLVMBuildPhi(LLVMBuilderRef B, LLVMTypeRef Ty, const char *Name) {
LLVMValueRef LLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name) {
- return wrap(unwrap(B)->CreateCall(unwrap(Fn),
- makeArrayRef(unwrap(Args), NumArgs),
- Name));
+ Value *V = unwrap(Fn);
+ FunctionType *FnT =
+ cast<FunctionType>(cast<PointerType>(V->getType())->getElementType());
+
+ return wrap(unwrap(B)->CreateCall(FnT, unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
+}
+
+LLVMValueRef LLVMBuildCall2(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name) {
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
+ return wrap(unwrap(B)->CreateCall(FTy, unwrap(Fn),
+ makeArrayRef(unwrap(Args), NumArgs), Name));
}
LLVMValueRef LLVMBuildSelect(LLVMBuilderRef B, LLVMValueRef If,
diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp
index 5c5477f4f40f..fb81634a2868 100644
--- a/lib/IR/DIBuilder.cpp
+++ b/lib/IR/DIBuilder.cpp
@@ -139,7 +139,8 @@ DICompileUnit *DIBuilder::createCompileUnit(
unsigned Lang, DIFile *File, StringRef Producer, bool isOptimized,
StringRef Flags, unsigned RunTimeVer, StringRef SplitName,
DICompileUnit::DebugEmissionKind Kind, uint64_t DWOId,
- bool SplitDebugInlining, bool DebugInfoForProfiling, bool GnuPubnames) {
+ bool SplitDebugInlining, bool DebugInfoForProfiling,
+ DICompileUnit::DebugNameTableKind NameTableKind, bool RangesBaseAddress) {
assert(((Lang <= dwarf::DW_LANG_Fortran08 && Lang >= dwarf::DW_LANG_C89) ||
(Lang <= dwarf::DW_LANG_hi_user && Lang >= dwarf::DW_LANG_lo_user)) &&
@@ -149,7 +150,8 @@ DICompileUnit *DIBuilder::createCompileUnit(
CUNode = DICompileUnit::getDistinct(
VMContext, Lang, File, Producer, isOptimized, Flags, RunTimeVer,
SplitName, Kind, nullptr, nullptr, nullptr, nullptr, nullptr, DWOId,
- SplitDebugInlining, DebugInfoForProfiling, GnuPubnames);
+ SplitDebugInlining, DebugInfoForProfiling, NameTableKind,
+ RangesBaseAddress);
// Create a named metadata so that it is easier to find cu in a module.
NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
@@ -256,10 +258,11 @@ DIBasicType *DIBuilder::createNullPtrType() {
}
DIBasicType *DIBuilder::createBasicType(StringRef Name, uint64_t SizeInBits,
- unsigned Encoding) {
+ unsigned Encoding,
+ DINode::DIFlags Flags) {
assert(!Name.empty() && "Unable to create type without name");
return DIBasicType::get(VMContext, dwarf::DW_TAG_base_type, Name, SizeInBits,
- 0, Encoding);
+ 0, Encoding, Flags);
}
DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
@@ -345,13 +348,10 @@ static ConstantAsMetadata *getConstantOrNull(Constant *C) {
return nullptr;
}
-DIDerivedType *DIBuilder::createVariantMemberType(DIScope *Scope, StringRef Name,
- DIFile *File, unsigned LineNumber,
- uint64_t SizeInBits,
- uint32_t AlignInBits,
- uint64_t OffsetInBits,
- Constant *Discriminant,
- DINode::DIFlags Flags, DIType *Ty) {
+DIDerivedType *DIBuilder::createVariantMemberType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
+ Constant *Discriminant, DINode::DIFlags Flags, DIType *Ty) {
return DIDerivedType::get(VMContext, dwarf::DW_TAG_member, Name, File,
LineNumber, getNonCompileUnitScope(Scope), Ty,
SizeInBits, AlignInBits, OffsetInBits, None, Flags,
@@ -504,11 +504,11 @@ DISubroutineType *DIBuilder::createSubroutineType(DITypeRefArray ParameterTypes,
DICompositeType *DIBuilder::createEnumerationType(
DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
- DIType *UnderlyingType, StringRef UniqueIdentifier, bool IsFixed) {
+ DIType *UnderlyingType, StringRef UniqueIdentifier, bool IsScoped) {
auto *CTy = DICompositeType::get(
VMContext, dwarf::DW_TAG_enumeration_type, Name, File, LineNumber,
getNonCompileUnitScope(Scope), UnderlyingType, SizeInBits, AlignInBits, 0,
- IsFixed ? DINode::FlagFixedEnum : DINode::FlagZero, Elements, 0, nullptr,
+ IsScoped ? DINode::FlagEnumClass : DINode::FlagZero, Elements, 0, nullptr,
nullptr, UniqueIdentifier);
AllEnumTypes.push_back(CTy);
trackIfUnresolved(CTy);
@@ -640,13 +640,13 @@ static void checkGlobalVariableScope(DIScope *Context) {
DIGlobalVariableExpression *DIBuilder::createGlobalVariableExpression(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
unsigned LineNumber, DIType *Ty, bool isLocalToUnit, DIExpression *Expr,
- MDNode *Decl, uint32_t AlignInBits) {
+ MDNode *Decl, MDTuple *templateParams, uint32_t AlignInBits) {
checkGlobalVariableScope(Context);
auto *GV = DIGlobalVariable::getDistinct(
VMContext, cast_or_null<DIScope>(Context), Name, LinkageName, F,
LineNumber, Ty, isLocalToUnit, true, cast_or_null<DIDerivedType>(Decl),
- AlignInBits);
+ templateParams, AlignInBits);
if (!Expr)
Expr = createExpression();
auto *N = DIGlobalVariableExpression::get(VMContext, GV, Expr);
@@ -657,13 +657,13 @@ DIGlobalVariableExpression *DIBuilder::createGlobalVariableExpression(
DIGlobalVariable *DIBuilder::createTempGlobalVariableFwdDecl(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
unsigned LineNumber, DIType *Ty, bool isLocalToUnit, MDNode *Decl,
- uint32_t AlignInBits) {
+ MDTuple *templateParams, uint32_t AlignInBits) {
checkGlobalVariableScope(Context);
return DIGlobalVariable::getTemporary(
VMContext, cast_or_null<DIScope>(Context), Name, LinkageName, F,
LineNumber, Ty, isLocalToUnit, false,
- cast_or_null<DIDerivedType>(Decl), AlignInBits)
+ cast_or_null<DIDerivedType>(Decl), templateParams, AlignInBits)
.release();
}
@@ -751,18 +751,18 @@ static DISubprogram *getSubprogram(bool IsDistinct, Ts &&... Args) {
DISubprogram *DIBuilder::createFunction(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned ScopeLine, DINode::DIFlags Flags,
- bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl,
+ unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
+ DINode::DIFlags Flags, DISubprogram::DISPFlags SPFlags,
+ DITemplateParameterArray TParams, DISubprogram *Decl,
DITypeArray ThrownTypes) {
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
auto *Node = getSubprogram(
- /* IsDistinct = */ isDefinition, VMContext,
- getNonCompileUnitScope(Context), Name, LinkageName, File, LineNo, Ty,
- isLocalToUnit, isDefinition, ScopeLine, nullptr, 0, 0, 0, Flags,
- isOptimized, isDefinition ? CUNode : nullptr, TParams, Decl,
+ /*IsDistinct=*/IsDefinition, VMContext, getNonCompileUnitScope(Context),
+ Name, LinkageName, File, LineNo, Ty, ScopeLine, nullptr, 0, 0, Flags,
+ SPFlags, IsDefinition ? CUNode : nullptr, TParams, Decl,
MDTuple::getTemporary(VMContext, None).release(), ThrownTypes);
- if (isDefinition)
+ if (IsDefinition)
AllSubprograms.push_back(Node);
trackIfUnresolved(Node);
return Node;
@@ -770,35 +770,37 @@ DISubprogram *DIBuilder::createFunction(
DISubprogram *DIBuilder::createTempFunctionFwdDecl(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned ScopeLine, DINode::DIFlags Flags,
- bool isOptimized, DITemplateParameterArray TParams, DISubprogram *Decl,
+ unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
+ DINode::DIFlags Flags, DISubprogram::DISPFlags SPFlags,
+ DITemplateParameterArray TParams, DISubprogram *Decl,
DITypeArray ThrownTypes) {
- return DISubprogram::getTemporary(
- VMContext, getNonCompileUnitScope(Context), Name, LinkageName,
- File, LineNo, Ty, isLocalToUnit, isDefinition, ScopeLine, nullptr,
- 0, 0, 0, Flags, isOptimized, isDefinition ? CUNode : nullptr,
- TParams, Decl, nullptr, ThrownTypes)
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
+ return DISubprogram::getTemporary(VMContext, getNonCompileUnitScope(Context),
+ Name, LinkageName, File, LineNo, Ty,
+ ScopeLine, nullptr, 0, 0, Flags, SPFlags,
+ IsDefinition ? CUNode : nullptr, TParams,
+ Decl, nullptr, ThrownTypes)
.release();
}
DISubprogram *DIBuilder::createMethod(
DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *F,
- unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
- bool isDefinition, unsigned VK, unsigned VIndex, int ThisAdjustment,
- DIType *VTableHolder, DINode::DIFlags Flags, bool isOptimized,
- DITemplateParameterArray TParams, DITypeArray ThrownTypes) {
+ unsigned LineNo, DISubroutineType *Ty, unsigned VIndex, int ThisAdjustment,
+ DIType *VTableHolder, DINode::DIFlags Flags,
+ DISubprogram::DISPFlags SPFlags, DITemplateParameterArray TParams,
+ DITypeArray ThrownTypes) {
assert(getNonCompileUnitScope(Context) &&
"Methods should have both a Context and a context that isn't "
"the compile unit.");
// FIXME: Do we want to use different scope/lines?
+ bool IsDefinition = SPFlags & DISubprogram::SPFlagDefinition;
auto *SP = getSubprogram(
- /* IsDistinct = */ isDefinition, VMContext, cast<DIScope>(Context), Name,
- LinkageName, F, LineNo, Ty, isLocalToUnit, isDefinition, LineNo,
- VTableHolder, VK, VIndex, ThisAdjustment, Flags, isOptimized,
- isDefinition ? CUNode : nullptr, TParams, nullptr, nullptr, ThrownTypes);
+ /*IsDistinct=*/IsDefinition, VMContext, cast<DIScope>(Context), Name,
+ LinkageName, F, LineNo, Ty, LineNo, VTableHolder, VIndex, ThisAdjustment,
+ Flags, SPFlags, IsDefinition ? CUNode : nullptr, TParams, nullptr,
+ nullptr, ThrownTypes);
- if (isDefinition)
+ if (IsDefinition)
AllSubprograms.push_back(SP);
trackIfUnresolved(SP);
return SP;
diff --git a/lib/IR/DataLayout.cpp b/lib/IR/DataLayout.cpp
index 62c67127276e..63c24b5ee7af 100644
--- a/lib/IR/DataLayout.cpp
+++ b/lib/IR/DataLayout.cpp
@@ -635,6 +635,14 @@ unsigned DataLayout::getPointerSize(unsigned AS) const {
return I->TypeByteWidth;
}
+unsigned DataLayout::getMaxPointerSize() const {
+ unsigned MaxPointerSize = 0;
+ for (auto &P : Pointers)
+ MaxPointerSize = std::max(MaxPointerSize, P.TypeByteWidth);
+
+ return MaxPointerSize;
+}
+
unsigned DataLayout::getPointerTypeSizeInBits(Type *Ty) const {
assert(Ty->isPtrOrPtrVectorTy() &&
"This should only be called with a pointer or pointer vector type");
@@ -808,15 +816,29 @@ int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy,
/// global. This includes an explicitly requested alignment (if the global
/// has one).
unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const {
+ unsigned GVAlignment = GV->getAlignment();
+ // If a section is specified, always precisely honor explicit alignment,
+ // so we don't insert padding into a section we don't control.
+ if (GVAlignment && GV->hasSection())
+ return GVAlignment;
+
+ // If no explicit alignment is specified, compute the alignment based on
+ // the IR type. If an alignment is specified, increase it to match the ABI
+ // alignment of the IR type.
+ //
+ // FIXME: Not sure it makes sense to use the alignment of the type if
+ // there's already an explicit alignment specification.
Type *ElemType = GV->getValueType();
unsigned Alignment = getPrefTypeAlignment(ElemType);
- unsigned GVAlignment = GV->getAlignment();
if (GVAlignment >= Alignment) {
Alignment = GVAlignment;
} else if (GVAlignment != 0) {
Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType));
}
+ // If no explicit alignment is specified, and the global is large, increase
+ // the alignment to 16.
+ // FIXME: Why 16, specifically?
if (GV->hasInitializer() && GVAlignment == 0) {
if (Alignment < 16) {
// If the global is not external, see if it is large. If so, give it a
diff --git a/lib/IR/DebugInfo.cpp b/lib/IR/DebugInfo.cpp
index 77585ee30cd8..9fa31773b598 100644
--- a/lib/IR/DebugInfo.cpp
+++ b/lib/IR/DebugInfo.cpp
@@ -280,7 +280,7 @@ bool DebugInfoFinder::addScope(DIScope *Scope) {
}
static MDNode *stripDebugLocFromLoopID(MDNode *N) {
- assert(N->op_begin() != N->op_end() && "Missing self reference?");
+ assert(!empty(N->operands()) && "Missing self reference?");
// if there is no debug location, we do not have to rewrite this MDNode.
if (std::none_of(N->op_begin() + 1, N->op_end(), [](const MDOperand &Op) {
@@ -438,11 +438,10 @@ private:
auto distinctMDSubprogram = [&]() {
return DISubprogram::getDistinct(
MDS->getContext(), FileAndScope, MDS->getName(), LinkageName,
- FileAndScope, MDS->getLine(), Type, MDS->isLocalToUnit(),
- MDS->isDefinition(), MDS->getScopeLine(), ContainingType,
- MDS->getVirtuality(), MDS->getVirtualIndex(),
- MDS->getThisAdjustment(), MDS->getFlags(), MDS->isOptimized(), Unit,
- TemplateParams, Declaration, Variables);
+ FileAndScope, MDS->getLine(), Type, MDS->getScopeLine(),
+ ContainingType, MDS->getVirtualIndex(), MDS->getThisAdjustment(),
+ MDS->getFlags(), MDS->getSPFlags(), Unit, TemplateParams, Declaration,
+ Variables);
};
if (MDS->isDistinct())
@@ -450,11 +449,9 @@ private:
auto *NewMDS = DISubprogram::get(
MDS->getContext(), FileAndScope, MDS->getName(), LinkageName,
- FileAndScope, MDS->getLine(), Type, MDS->isLocalToUnit(),
- MDS->isDefinition(), MDS->getScopeLine(), ContainingType,
- MDS->getVirtuality(), MDS->getVirtualIndex(), MDS->getThisAdjustment(),
- MDS->getFlags(), MDS->isOptimized(), Unit, TemplateParams, Declaration,
- Variables);
+ FileAndScope, MDS->getLine(), Type, MDS->getScopeLine(), ContainingType,
+ MDS->getVirtualIndex(), MDS->getThisAdjustment(), MDS->getFlags(),
+ MDS->getSPFlags(), Unit, TemplateParams, Declaration, Variables);
StringRef OldLinkageName = MDS->getLinkageName();
@@ -491,7 +488,8 @@ private:
CU->getSplitDebugFilename(), DICompileUnit::LineTablesOnly, EnumTypes,
RetainedTypes, GlobalVariables, ImportedEntities, CU->getMacros(),
CU->getDWOId(), CU->getSplitDebugInlining(),
- CU->getDebugInfoForProfiling(), CU->getGnuPubnames());
+ CU->getDebugInfoForProfiling(), CU->getNameTableKind(),
+ CU->getRangesBaseAddress());
}
DILocation *getReplacementMDLocation(DILocation *MLD) {
@@ -690,8 +688,7 @@ unsigned llvm::getDebugMetadataVersionFromModule(const Module &M) {
void Instruction::applyMergedLocation(const DILocation *LocA,
const DILocation *LocB) {
- setDebugLoc(DILocation::getMergedLocation(LocA, LocB,
- DILocation::WithGeneratedLocation));
+ setDebugLoc(DILocation::getMergedLocation(LocA, LocB));
}
//===----------------------------------------------------------------------===//
@@ -700,8 +697,9 @@ void Instruction::applyMergedLocation(const DILocation *LocA,
static unsigned map_from_llvmDWARFsourcelanguage(LLVMDWARFSourceLanguage lang) {
switch (lang) {
-#define HANDLE_DW_LANG(ID, NAME, VERSION, VENDOR) \
-case LLVMDWARFSourceLanguage##NAME: return ID;
+#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR) \
+ case LLVMDWARFSourceLanguage##NAME: \
+ return ID;
#include "llvm/BinaryFormat/Dwarf.def"
#undef HANDLE_DW_LANG
}
@@ -720,6 +718,11 @@ static LLVMDIFlags map_to_llvmDIFlags(DINode::DIFlags Flags) {
return static_cast<LLVMDIFlags>(Flags);
}
+static DISubprogram::DISPFlags
+pack_into_DISPFlags(bool IsLocalToUnit, bool IsDefinition, bool IsOptimized) {
+ return DISubprogram::toSPFlags(IsLocalToUnit, IsDefinition, IsOptimized);
+}
+
unsigned LLVMDebugMetadataVersion() {
return DEBUG_METADATA_VERSION;
}
@@ -803,9 +806,10 @@ LLVMMetadataRef LLVMDIBuilderCreateFunction(
unsigned ScopeLine, LLVMDIFlags Flags, LLVMBool IsOptimized) {
return wrap(unwrap(Builder)->createFunction(
unwrapDI<DIScope>(Scope), {Name, NameLen}, {LinkageName, LinkageNameLen},
- unwrapDI<DIFile>(File), LineNo, unwrapDI<DISubroutineType>(Ty),
- IsLocalToUnit, IsDefinition, ScopeLine, map_from_llvmDIFlags(Flags),
- IsOptimized, nullptr, nullptr, nullptr));
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DISubroutineType>(Ty), ScopeLine,
+ map_from_llvmDIFlags(Flags),
+ pack_into_DISPFlags(IsLocalToUnit, IsDefinition, IsOptimized), nullptr,
+ nullptr, nullptr));
}
@@ -948,9 +952,11 @@ LLVMDIBuilderCreateVectorType(LLVMDIBuilderRef Builder, uint64_t Size,
LLVMMetadataRef
LLVMDIBuilderCreateBasicType(LLVMDIBuilderRef Builder, const char *Name,
size_t NameLen, uint64_t SizeInBits,
- LLVMDWARFTypeEncoding Encoding) {
+ LLVMDWARFTypeEncoding Encoding,
+ LLVMDIFlags Flags) {
return wrap(unwrap(Builder)->createBasicType({Name, NameLen},
- SizeInBits, Encoding));
+ SizeInBits, Encoding,
+ map_from_llvmDIFlags(Flags)));
}
LLVMMetadataRef LLVMDIBuilderCreatePointerType(
@@ -1219,23 +1225,16 @@ LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
return wrap(unwrap(Builder)->createConstantValueExpression(Value));
}
-LLVMMetadataRef
-LLVMDIBuilderCreateGlobalVariableExpression(LLVMDIBuilderRef Builder,
- LLVMMetadataRef Scope,
- const char *Name, size_t NameLen,
- const char *Linkage, size_t LinkLen,
- LLVMMetadataRef File,
- unsigned LineNo,
- LLVMMetadataRef Ty,
- LLVMBool LocalToUnit,
- LLVMMetadataRef Expr,
- LLVMMetadataRef Decl,
- uint32_t AlignInBits) {
+LLVMMetadataRef LLVMDIBuilderCreateGlobalVariableExpression(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LinkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Expr, LLVMMetadataRef Decl, uint32_t AlignInBits) {
return wrap(unwrap(Builder)->createGlobalVariableExpression(
- unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LinkLen},
- unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty),
- LocalToUnit, unwrap<DIExpression>(Expr),
- unwrapDI<MDNode>(Decl), AlignInBits));
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LinkLen},
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), LocalToUnit,
+ unwrap<DIExpression>(Expr), unwrapDI<MDNode>(Decl),
+ nullptr, AlignInBits));
}
LLVMMetadataRef LLVMTemporaryMDNode(LLVMContextRef Ctx, LLVMMetadataRef *Data,
@@ -1255,26 +1254,21 @@ void LLVMMetadataReplaceAllUsesWith(LLVMMetadataRef TargetMetadata,
MDNode::deleteTemporary(Node);
}
-LLVMMetadataRef
-LLVMDIBuilderCreateTempGlobalVariableFwdDecl(LLVMDIBuilderRef Builder,
- LLVMMetadataRef Scope,
- const char *Name, size_t NameLen,
- const char *Linkage, size_t LnkLen,
- LLVMMetadataRef File,
- unsigned LineNo,
- LLVMMetadataRef Ty,
- LLVMBool LocalToUnit,
- LLVMMetadataRef Decl,
- uint32_t AlignInBits) {
+LLVMMetadataRef LLVMDIBuilderCreateTempGlobalVariableFwdDecl(
+ LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
+ size_t NameLen, const char *Linkage, size_t LnkLen, LLVMMetadataRef File,
+ unsigned LineNo, LLVMMetadataRef Ty, LLVMBool LocalToUnit,
+ LLVMMetadataRef Decl, uint32_t AlignInBits) {
return wrap(unwrap(Builder)->createTempGlobalVariableFwdDecl(
- unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LnkLen},
- unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty),
- LocalToUnit, unwrapDI<MDNode>(Decl), AlignInBits));
+ unwrapDI<DIScope>(Scope), {Name, NameLen}, {Linkage, LnkLen},
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), LocalToUnit,
+ unwrapDI<MDNode>(Decl), nullptr, AlignInBits));
}
-LLVMValueRef LLVMDIBuilderInsertDeclareBefore(
- LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
- LLVMMetadataRef Expr, LLVMMetadataRef DL, LLVMValueRef Instr) {
+LLVMValueRef
+LLVMDIBuilderInsertDeclareBefore(LLVMDIBuilderRef Builder, LLVMValueRef Storage,
+ LLVMMetadataRef VarInfo, LLVMMetadataRef Expr,
+ LLVMMetadataRef DL, LLVMValueRef Instr) {
return wrap(unwrap(Builder)->insertDeclare(
unwrap(Storage), unwrap<DILocalVariable>(VarInfo),
unwrap<DIExpression>(Expr), unwrap<DILocation>(DL),
@@ -1329,7 +1323,7 @@ LLVMMetadataRef LLVMDIBuilderCreateParameterVariable(
size_t NameLen, unsigned ArgNo, LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, LLVMBool AlwaysPreserve, LLVMDIFlags Flags) {
return wrap(unwrap(Builder)->createParameterVariable(
- unwrap<DIScope>(Scope), Name, ArgNo, unwrap<DIFile>(File),
+ unwrap<DIScope>(Scope), {Name, NameLen}, ArgNo, unwrap<DIFile>(File),
LineNo, unwrap<DIType>(Ty), AlwaysPreserve,
map_from_llvmDIFlags(Flags)));
}
@@ -1353,3 +1347,14 @@ LLVMMetadataRef LLVMGetSubprogram(LLVMValueRef Func) {
void LLVMSetSubprogram(LLVMValueRef Func, LLVMMetadataRef SP) {
unwrap<Function>(Func)->setSubprogram(unwrap<DISubprogram>(SP));
}
+
+LLVMMetadataKind LLVMGetMetadataKind(LLVMMetadataRef Metadata) {
+ switch(unwrap(Metadata)->getMetadataID()) {
+#define HANDLE_METADATA_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ return (LLVMMetadataKind)LLVM##CLASS##MetadataKind;
+#include "llvm/IR/Metadata.def"
+ default:
+ return (LLVMMetadataKind)LLVMGenericDINodeMetadataKind;
+ }
+}
diff --git a/lib/IR/DebugInfoMetadata.cpp b/lib/IR/DebugInfoMetadata.cpp
index 910e8c2fb74f..92f3f21f754c 100644
--- a/lib/IR/DebugInfoMetadata.cpp
+++ b/lib/IR/DebugInfoMetadata.cpp
@@ -14,16 +14,19 @@
#include "llvm/IR/DebugInfoMetadata.h"
#include "LLVMContextImpl.h"
#include "MetadataImpl.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
+#include <numeric>
+
using namespace llvm;
DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
- unsigned Column, ArrayRef<Metadata *> MDs)
+ unsigned Column, ArrayRef<Metadata *> MDs,
+ bool ImplicitCode)
: MDNode(C, DILocationKind, Storage, MDs) {
assert((MDs.size() == 1 || MDs.size() == 2) &&
"Expected a scope and optional inlined-at");
@@ -33,6 +36,8 @@ DILocation::DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
SubclassData32 = Line;
SubclassData16 = Column;
+
+ setImplicitCode(ImplicitCode);
}
static void adjustColumn(unsigned &Column) {
@@ -43,15 +48,15 @@ static void adjustColumn(unsigned &Column) {
DILocation *DILocation::getImpl(LLVMContext &Context, unsigned Line,
unsigned Column, Metadata *Scope,
- Metadata *InlinedAt, StorageType Storage,
- bool ShouldCreate) {
+ Metadata *InlinedAt, bool ImplicitCode,
+ StorageType Storage, bool ShouldCreate) {
// Fixup column.
adjustColumn(Column);
if (Storage == Uniqued) {
- if (auto *N =
- getUniqued(Context.pImpl->DILocations,
- DILocationInfo::KeyTy(Line, Column, Scope, InlinedAt)))
+ if (auto *N = getUniqued(Context.pImpl->DILocations,
+ DILocationInfo::KeyTy(Line, Column, Scope,
+ InlinedAt, ImplicitCode)))
return N;
if (!ShouldCreate)
return nullptr;
@@ -63,36 +68,94 @@ DILocation *DILocation::getImpl(LLVMContext &Context, unsigned Line,
Ops.push_back(Scope);
if (InlinedAt)
Ops.push_back(InlinedAt);
- return storeImpl(new (Ops.size())
- DILocation(Context, Storage, Line, Column, Ops),
+ return storeImpl(new (Ops.size()) DILocation(Context, Storage, Line, Column,
+ Ops, ImplicitCode),
Storage, Context.pImpl->DILocations);
}
const DILocation *DILocation::getMergedLocation(const DILocation *LocA,
- const DILocation *LocB,
- bool GenerateLocation) {
+ const DILocation *LocB) {
if (!LocA || !LocB)
return nullptr;
- if (LocA == LocB || !LocA->canDiscriminate(*LocB))
+ if (LocA == LocB)
return LocA;
- if (!GenerateLocation)
- return nullptr;
-
SmallPtrSet<DILocation *, 5> InlinedLocationsA;
for (DILocation *L = LocA->getInlinedAt(); L; L = L->getInlinedAt())
InlinedLocationsA.insert(L);
+ SmallSet<std::pair<DIScope *, DILocation *>, 5> Locations;
+ DIScope *S = LocA->getScope();
+ DILocation *L = LocA->getInlinedAt();
+ while (S) {
+ Locations.insert(std::make_pair(S, L));
+ S = S->getScope().resolve();
+ if (!S && L) {
+ S = L->getScope();
+ L = L->getInlinedAt();
+ }
+ }
const DILocation *Result = LocB;
- for (DILocation *L = LocB->getInlinedAt(); L; L = L->getInlinedAt()) {
- Result = L;
- if (InlinedLocationsA.count(L))
+ S = LocB->getScope();
+ L = LocB->getInlinedAt();
+ while (S) {
+ if (Locations.count(std::make_pair(S, L)))
break;
+ S = S->getScope().resolve();
+ if (!S && L) {
+ S = L->getScope();
+ L = L->getInlinedAt();
+ }
}
- return DILocation::get(Result->getContext(), 0, 0, Result->getScope(),
- Result->getInlinedAt());
+
+ // If the two locations are irreconsilable, just pick one. This is misleading,
+ // but on the other hand, it's a "line 0" location.
+ if (!S || !isa<DILocalScope>(S))
+ S = LocA->getScope();
+ return DILocation::get(Result->getContext(), 0, 0, S, L);
}
+Optional<unsigned> DILocation::encodeDiscriminator(unsigned BD, unsigned DF, unsigned CI) {
+ SmallVector<unsigned, 3> Components = {BD, DF, CI};
+ uint64_t RemainingWork = 0U;
+ // We use RemainingWork to figure out if we have no remaining components to
+ // encode. For example: if BD != 0 but DF == 0 && CI == 0, we don't need to
+ // encode anything for the latter 2.
+ // Since any of the input components is at most 32 bits, their sum will be
+ // less than 34 bits, and thus RemainingWork won't overflow.
+ RemainingWork = std::accumulate(Components.begin(), Components.end(), RemainingWork);
+
+ int I = 0;
+ unsigned Ret = 0;
+ unsigned NextBitInsertionIndex = 0;
+ while (RemainingWork > 0) {
+ unsigned C = Components[I++];
+ RemainingWork -= C;
+ unsigned EC = encodeComponent(C);
+ Ret |= (EC << NextBitInsertionIndex);
+ NextBitInsertionIndex += encodingBits(C);
+ }
+
+ // Encoding may be unsuccessful because of overflow. We determine success by
+ // checking equivalence of components before & after encoding. Alternatively,
+ // we could determine Success during encoding, but the current alternative is
+ // simpler.
+ unsigned TBD, TDF, TCI = 0;
+ decodeDiscriminator(Ret, TBD, TDF, TCI);
+ if (TBD == BD && TDF == DF && TCI == CI)
+ return Ret;
+ return None;
+}
+
+void DILocation::decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
+ unsigned &CI) {
+ BD = getUnsignedFromPrefixEncoding(D);
+ DF = getUnsignedFromPrefixEncoding(getNextComponentInDiscriminator(D));
+ CI = getUnsignedFromPrefixEncoding(
+ getNextComponentInDiscriminator(getNextComponentInDiscriminator(D)));
+}
+
+
DINode::DIFlags DINode::getFlag(StringRef Flag) {
return StringSwitch<DIFlags>(Flag)
#define HANDLE_DI_FLAG(ID, NAME) .Case("DIFlag" #NAME, Flag##NAME)
@@ -274,13 +337,14 @@ DIEnumerator *DIEnumerator::getImpl(LLVMContext &Context, int64_t Value,
DIBasicType *DIBasicType::getImpl(LLVMContext &Context, unsigned Tag,
MDString *Name, uint64_t SizeInBits,
uint32_t AlignInBits, unsigned Encoding,
- StorageType Storage, bool ShouldCreate) {
+ DIFlags Flags, StorageType Storage,
+ bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
DEFINE_GETIMPL_LOOKUP(DIBasicType,
- (Tag, Name, SizeInBits, AlignInBits, Encoding));
+ (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags));
Metadata *Ops[] = {nullptr, nullptr, Name};
- DEFINE_GETIMPL_STORE(DIBasicType, (Tag, SizeInBits, AlignInBits, Encoding),
- Ops);
+ DEFINE_GETIMPL_STORE(DIBasicType, (Tag, SizeInBits, AlignInBits, Encoding,
+ Flags), Ops);
}
Optional<DIBasicType::Signedness> DIBasicType::getSignedness() const {
@@ -449,7 +513,8 @@ DICompileUnit *DICompileUnit::getImpl(
unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
Metadata *GlobalVariables, Metadata *ImportedEntities, Metadata *Macros,
uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
- bool GnuPubnames, StorageType Storage, bool ShouldCreate) {
+ unsigned NameTableKind, bool RangesBaseAddress, StorageType Storage,
+ bool ShouldCreate) {
assert(Storage != Uniqued && "Cannot unique DICompileUnit");
assert(isCanonical(Producer) && "Expected canonical MDString");
assert(isCanonical(Flags) && "Expected canonical MDString");
@@ -462,7 +527,8 @@ DICompileUnit *DICompileUnit::getImpl(
return storeImpl(new (array_lengthof(Ops)) DICompileUnit(
Context, Storage, SourceLanguage, IsOptimized,
RuntimeVersion, EmissionKind, DWOId, SplitDebugInlining,
- DebugInfoForProfiling, GnuPubnames, Ops),
+ DebugInfoForProfiling, NameTableKind, RangesBaseAddress,
+ Ops),
Storage);
}
@@ -472,6 +538,16 @@ DICompileUnit::getEmissionKind(StringRef Str) {
.Case("NoDebug", NoDebug)
.Case("FullDebug", FullDebug)
.Case("LineTablesOnly", LineTablesOnly)
+ .Case("DebugDirectivesOnly", DebugDirectivesOnly)
+ .Default(None);
+}
+
+Optional<DICompileUnit::DebugNameTableKind>
+DICompileUnit::getNameTableKind(StringRef Str) {
+ return StringSwitch<Optional<DebugNameTableKind>>(Str)
+ .Case("Default", DebugNameTableKind::Default)
+ .Case("GNU", DebugNameTableKind::GNU)
+ .Case("None", DebugNameTableKind::None)
.Default(None);
}
@@ -480,6 +556,19 @@ const char *DICompileUnit::emissionKindString(DebugEmissionKind EK) {
case NoDebug: return "NoDebug";
case FullDebug: return "FullDebug";
case LineTablesOnly: return "LineTablesOnly";
+ case DebugDirectivesOnly: return "DebugDirectivesOnly";
+ }
+ return nullptr;
+}
+
+const char *DICompileUnit::nameTableKindString(DebugNameTableKind NTK) {
+ switch (NTK) {
+ case DebugNameTableKind::Default:
+ return nullptr;
+ case DebugNameTableKind::GNU:
+ return "GNU";
+ case DebugNameTableKind::None:
+ return "None";
}
return nullptr;
}
@@ -496,21 +585,55 @@ DILocalScope *DILocalScope::getNonLexicalBlockFileScope() const {
return const_cast<DILocalScope *>(this);
}
+DISubprogram::DISPFlags DISubprogram::getFlag(StringRef Flag) {
+ return StringSwitch<DISPFlags>(Flag)
+#define HANDLE_DISP_FLAG(ID, NAME) .Case("DISPFlag" #NAME, SPFlag##NAME)
+#include "llvm/IR/DebugInfoFlags.def"
+ .Default(SPFlagZero);
+}
+
+StringRef DISubprogram::getFlagString(DISPFlags Flag) {
+ switch (Flag) {
+ // Appease a warning.
+ case SPFlagVirtuality:
+ return "";
+#define HANDLE_DISP_FLAG(ID, NAME) \
+ case SPFlag##NAME: \
+ return "DISPFlag" #NAME;
+#include "llvm/IR/DebugInfoFlags.def"
+ }
+ return "";
+}
+
+DISubprogram::DISPFlags
+DISubprogram::splitFlags(DISPFlags Flags,
+ SmallVectorImpl<DISPFlags> &SplitFlags) {
+ // Multi-bit fields can require special handling. In our case, however, the
+ // only multi-bit field is virtuality, and all its values happen to be
+ // single-bit values, so the right behavior just falls out.
+#define HANDLE_DISP_FLAG(ID, NAME) \
+ if (DISPFlags Bit = Flags & SPFlag##NAME) { \
+ SplitFlags.push_back(Bit); \
+ Flags &= ~Bit; \
+ }
+#include "llvm/IR/DebugInfoFlags.def"
+ return Flags;
+}
+
DISubprogram *DISubprogram::getImpl(
LLVMContext &Context, Metadata *Scope, MDString *Name,
MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
- bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
- Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
- int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
+ unsigned ScopeLine, Metadata *ContainingType, unsigned VirtualIndex,
+ int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
Metadata *TemplateParams, Metadata *Declaration, Metadata *RetainedNodes,
Metadata *ThrownTypes, StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
assert(isCanonical(LinkageName) && "Expected canonical MDString");
- DEFINE_GETIMPL_LOOKUP(
- DISubprogram, (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
- IsDefinition, ScopeLine, ContainingType, Virtuality,
- VirtualIndex, ThisAdjustment, Flags, IsOptimized, Unit,
- TemplateParams, Declaration, RetainedNodes, ThrownTypes));
+ DEFINE_GETIMPL_LOOKUP(DISubprogram,
+ (Scope, Name, LinkageName, File, Line, Type, ScopeLine,
+ ContainingType, VirtualIndex, ThisAdjustment, Flags,
+ SPFlags, Unit, TemplateParams, Declaration,
+ RetainedNodes, ThrownTypes));
SmallVector<Metadata *, 11> Ops = {
File, Scope, Name, LinkageName, Type, Unit,
Declaration, RetainedNodes, ContainingType, TemplateParams, ThrownTypes};
@@ -522,11 +645,10 @@ DISubprogram *DISubprogram::getImpl(
Ops.pop_back();
}
}
- DEFINE_GETIMPL_STORE_N(DISubprogram,
- (Line, ScopeLine, Virtuality, VirtualIndex,
- ThisAdjustment, Flags, IsLocalToUnit, IsDefinition,
- IsOptimized),
- Ops, Ops.size());
+ DEFINE_GETIMPL_STORE_N(
+ DISubprogram,
+ (Line, ScopeLine, VirtualIndex, ThisAdjustment, Flags, SPFlags), Ops,
+ Ops.size());
}
bool DISubprogram::describes(const Function *F) const {
@@ -609,19 +731,24 @@ DIGlobalVariable::getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
MDString *LinkageName, Metadata *File, unsigned Line,
Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
Metadata *StaticDataMemberDeclaration,
- uint32_t AlignInBits, StorageType Storage,
- bool ShouldCreate) {
+ Metadata *TemplateParams, uint32_t AlignInBits,
+ StorageType Storage, bool ShouldCreate) {
assert(isCanonical(Name) && "Expected canonical MDString");
assert(isCanonical(LinkageName) && "Expected canonical MDString");
- DEFINE_GETIMPL_LOOKUP(DIGlobalVariable,
- (Scope, Name, LinkageName, File, Line, Type,
- IsLocalToUnit, IsDefinition,
- StaticDataMemberDeclaration, AlignInBits));
- Metadata *Ops[] = {
- Scope, Name, File, Type, Name, LinkageName, StaticDataMemberDeclaration};
+ DEFINE_GETIMPL_LOOKUP(DIGlobalVariable, (Scope, Name, LinkageName, File, Line,
+ Type, IsLocalToUnit, IsDefinition,
+ StaticDataMemberDeclaration,
+ TemplateParams, AlignInBits));
+ Metadata *Ops[] = {Scope,
+ Name,
+ File,
+ Type,
+ Name,
+ LinkageName,
+ StaticDataMemberDeclaration,
+ TemplateParams};
DEFINE_GETIMPL_STORE(DIGlobalVariable,
- (Line, IsLocalToUnit, IsDefinition, AlignInBits),
- Ops);
+ (Line, IsLocalToUnit, IsDefinition, AlignInBits), Ops);
}
DILocalVariable *DILocalVariable::getImpl(LLVMContext &Context, Metadata *Scope,
diff --git a/lib/IR/DebugLoc.cpp b/lib/IR/DebugLoc.cpp
index 36f3e179a2c0..10ec98ac7e6c 100644
--- a/lib/IR/DebugLoc.cpp
+++ b/lib/IR/DebugLoc.cpp
@@ -56,15 +56,28 @@ DebugLoc DebugLoc::getFnDebugLoc() const {
return DebugLoc();
}
+bool DebugLoc::isImplicitCode() const {
+ if (DILocation *Loc = get()) {
+ return Loc->isImplicitCode();
+ }
+ return true;
+}
+
+void DebugLoc::setImplicitCode(bool ImplicitCode) {
+ if (DILocation *Loc = get()) {
+ Loc->setImplicitCode(ImplicitCode);
+ }
+}
+
DebugLoc DebugLoc::get(unsigned Line, unsigned Col, const MDNode *Scope,
- const MDNode *InlinedAt) {
+ const MDNode *InlinedAt, bool ImplicitCode) {
// If no scope is available, this is an unknown location.
if (!Scope)
return DebugLoc();
return DILocation::get(Scope->getContext(), Line, Col,
const_cast<MDNode *>(Scope),
- const_cast<MDNode *>(InlinedAt));
+ const_cast<MDNode *>(InlinedAt), ImplicitCode);
}
DebugLoc DebugLoc::appendInlinedAt(DebugLoc DL, DILocation *InlinedAt,
diff --git a/lib/IR/DiagnosticInfo.cpp b/lib/IR/DiagnosticInfo.cpp
index 5ddb1196b072..dc957ab7dad9 100644
--- a/lib/IR/DiagnosticInfo.cpp
+++ b/lib/IR/DiagnosticInfo.cpp
@@ -33,9 +33,10 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/raw_ostream.h"
#include <atomic>
#include <cassert>
#include <memory>
@@ -103,10 +104,15 @@ void DiagnosticInfoPGOProfile::print(DiagnosticPrinter &DP) const {
DP << getMsg();
}
+void DiagnosticInfo::anchor() {}
+void DiagnosticInfoStackSize::anchor() {}
+void DiagnosticInfoWithLocationBase::anchor() {}
+void DiagnosticInfoIROptimization::anchor() {}
+
DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
if (!DL)
return;
- Filename = DL->getFilename();
+ File = DL->getFile();
Line = DL->getLine();
Column = DL->getColumn();
}
@@ -114,17 +120,36 @@ DiagnosticLocation::DiagnosticLocation(const DebugLoc &DL) {
DiagnosticLocation::DiagnosticLocation(const DISubprogram *SP) {
if (!SP)
return;
- Filename = SP->getFilename();
+
+ File = SP->getFile();
Line = SP->getScopeLine();
Column = 0;
}
-void DiagnosticInfoWithLocationBase::getLocation(StringRef *Filename,
- unsigned *Line,
- unsigned *Column) const {
- *Filename = Loc.getFilename();
- *Line = Loc.getLine();
- *Column = Loc.getColumn();
+StringRef DiagnosticLocation::getRelativePath() const {
+ return File->getFilename();
+}
+
+std::string DiagnosticLocation::getAbsolutePath() const {
+ StringRef Name = File->getFilename();
+ if (sys::path::is_absolute(Name))
+ return Name;
+
+ SmallString<128> Path;
+ sys::path::append(Path, File->getDirectory(), Name);
+ return sys::path::remove_leading_dotslash(Path).str();
+}
+
+std::string DiagnosticInfoWithLocationBase::getAbsolutePath() const {
+ return Loc.getAbsolutePath();
+}
+
+void DiagnosticInfoWithLocationBase::getLocation(StringRef &RelativePath,
+ unsigned &Line,
+ unsigned &Column) const {
+ RelativePath = Loc.getRelativePath();
+ Line = Loc.getLine();
+ Column = Loc.getColumn();
}
const std::string DiagnosticInfoWithLocationBase::getLocationStr() const {
@@ -132,7 +157,7 @@ const std::string DiagnosticInfoWithLocationBase::getLocationStr() const {
unsigned Line = 0;
unsigned Column = 0;
if (isLocationAvailable())
- getLocation(&Filename, &Line, &Column);
+ getLocation(Filename, Line, Column);
return (Filename + ":" + Twine(Line) + ":" + Twine(Column)).str();
}
@@ -346,6 +371,9 @@ std::string DiagnosticInfoOptimizationBase::getMsg() const {
return OS.str();
}
+void OptimizationRemarkAnalysisFPCommute::anchor() {}
+void OptimizationRemarkAnalysisAliasing::anchor() {}
+
namespace llvm {
namespace yaml {
@@ -399,7 +427,7 @@ template <> struct MappingTraits<DiagnosticLocation> {
static void mapping(IO &io, DiagnosticLocation &DL) {
assert(io.outputting() && "input not yet implemented");
- StringRef File = DL.getFilename();
+ StringRef File = DL.getRelativePath();
unsigned Line = DL.getLine();
unsigned Col = DL.getColumn();
diff --git a/lib/IR/DomTreeUpdater.cpp b/lib/IR/DomTreeUpdater.cpp
index f035a86eddae..b72c1b77c2ce 100644
--- a/lib/IR/DomTreeUpdater.cpp
+++ b/lib/IR/DomTreeUpdater.cpp
@@ -152,39 +152,34 @@ bool DomTreeUpdater::forceFlushDeletedBB() {
return true;
}
-bool DomTreeUpdater::recalculate(Function &F) {
- if (!DT && !PDT)
- return false;
+void DomTreeUpdater::recalculate(Function &F) {
if (Strategy == UpdateStrategy::Eager) {
if (DT)
DT->recalculate(F);
if (PDT)
PDT->recalculate(F);
- return true;
+ return;
}
+ // There is little performance gain if we pend the recalculation under
+ // Lazy UpdateStrategy so we recalculate available trees immediately.
+
// Prevent forceFlushDeletedBB() from erasing DomTree or PostDomTree nodes.
IsRecalculatingDomTree = IsRecalculatingPostDomTree = true;
// Because all trees are going to be up-to-date after recalculation,
// flush awaiting deleted BasicBlocks.
- if (forceFlushDeletedBB() || hasPendingUpdates()) {
- if (DT)
- DT->recalculate(F);
- if (PDT)
- PDT->recalculate(F);
-
- // Resume forceFlushDeletedBB() to erase DomTree or PostDomTree nodes.
- IsRecalculatingDomTree = IsRecalculatingPostDomTree = false;
- PendDTUpdateIndex = PendPDTUpdateIndex = PendUpdates.size();
- dropOutOfDateUpdates();
- return true;
- }
+ forceFlushDeletedBB();
+ if (DT)
+ DT->recalculate(F);
+ if (PDT)
+ PDT->recalculate(F);
// Resume forceFlushDeletedBB() to erase DomTree or PostDomTree nodes.
IsRecalculatingDomTree = IsRecalculatingPostDomTree = false;
- return false;
+ PendDTUpdateIndex = PendPDTUpdateIndex = PendUpdates.size();
+ dropOutOfDateUpdates();
}
bool DomTreeUpdater::hasPendingUpdates() const {
diff --git a/lib/IR/Dominators.cpp b/lib/IR/Dominators.cpp
index d8971e05f476..cf9f5759ba53 100644
--- a/lib/IR/Dominators.cpp
+++ b/lib/IR/Dominators.cpp
@@ -41,7 +41,7 @@ static constexpr bool ExpensiveChecksEnabled = false;
#endif
bool BasicBlockEdge::isSingleEdge() const {
- const TerminatorInst *TI = Start->getTerminator();
+ const Instruction *TI = Start->getTerminator();
unsigned NumEdgesToEnd = 0;
for (unsigned int i = 0, n = TI->getNumSuccessors(); i < n; ++i) {
if (TI->getSuccessor(i) == End)
@@ -67,12 +67,17 @@ template class llvm::DomTreeNodeBase<BasicBlock>;
template class llvm::DominatorTreeBase<BasicBlock, false>; // DomTreeBase
template class llvm::DominatorTreeBase<BasicBlock, true>; // PostDomTreeBase
-template struct llvm::DomTreeBuilder::Update<BasicBlock *>;
+template class llvm::cfg::Update<BasicBlock *>;
template void llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBDomTree>(
DomTreeBuilder::BBDomTree &DT);
+template void
+llvm::DomTreeBuilder::CalculateWithUpdates<DomTreeBuilder::BBDomTree>(
+ DomTreeBuilder::BBDomTree &DT, BBUpdates U);
+
template void llvm::DomTreeBuilder::Calculate<DomTreeBuilder::BBPostDomTree>(
DomTreeBuilder::BBPostDomTree &DT);
+// No CalculateWithUpdates<PostDomTree> instantiation, unless a usecase arises.
template void llvm::DomTreeBuilder::InsertEdge<DomTreeBuilder::BBDomTree>(
DomTreeBuilder::BBDomTree &DT, BasicBlock *From, BasicBlock *To);
@@ -372,193 +377,3 @@ void DominatorTreeWrapperPass::print(raw_ostream &OS, const Module *) const {
DT.print(OS);
}
-//===----------------------------------------------------------------------===//
-// DeferredDominance Implementation
-//===----------------------------------------------------------------------===//
-//
-// The implementation details of the DeferredDominance class which allows
-// one to queue updates to a DominatorTree.
-//
-//===----------------------------------------------------------------------===//
-
-/// Queues multiple updates and discards duplicates.
-void DeferredDominance::applyUpdates(
- ArrayRef<DominatorTree::UpdateType> Updates) {
- SmallVector<DominatorTree::UpdateType, 8> Seen;
- for (auto U : Updates)
- // Avoid duplicates to applyUpdate() to save on analysis.
- if (std::none_of(Seen.begin(), Seen.end(),
- [U](DominatorTree::UpdateType S) { return S == U; })) {
- Seen.push_back(U);
- applyUpdate(U.getKind(), U.getFrom(), U.getTo());
- }
-}
-
-/// Helper method for a single edge insertion. It's almost always better
-/// to batch updates and call applyUpdates to quickly remove duplicate edges.
-/// This is best used when there is only a single insertion needed to update
-/// Dominators.
-void DeferredDominance::insertEdge(BasicBlock *From, BasicBlock *To) {
- applyUpdate(DominatorTree::Insert, From, To);
-}
-
-/// Helper method for a single edge deletion. It's almost always better
-/// to batch updates and call applyUpdates to quickly remove duplicate edges.
-/// This is best used when there is only a single deletion needed to update
-/// Dominators.
-void DeferredDominance::deleteEdge(BasicBlock *From, BasicBlock *To) {
- applyUpdate(DominatorTree::Delete, From, To);
-}
-
-/// Delays the deletion of a basic block until a flush() event.
-void DeferredDominance::deleteBB(BasicBlock *DelBB) {
- assert(DelBB && "Invalid push_back of nullptr DelBB.");
- assert(pred_empty(DelBB) && "DelBB has one or more predecessors.");
- // DelBB is unreachable and all its instructions are dead.
- while (!DelBB->empty()) {
- Instruction &I = DelBB->back();
- // Replace used instructions with an arbitrary value (undef).
- if (!I.use_empty())
- I.replaceAllUsesWith(llvm::UndefValue::get(I.getType()));
- DelBB->getInstList().pop_back();
- }
- // Make sure DelBB has a valid terminator instruction. As long as DelBB is a
- // Child of Function F it must contain valid IR.
- new UnreachableInst(DelBB->getContext(), DelBB);
- DeletedBBs.insert(DelBB);
-}
-
-/// Returns true if DelBB is awaiting deletion at a flush() event.
-bool DeferredDominance::pendingDeletedBB(BasicBlock *DelBB) {
- if (DeletedBBs.empty())
- return false;
- return DeletedBBs.count(DelBB) != 0;
-}
-
-/// Returns true if pending DT updates are queued for a flush() event.
-bool DeferredDominance::pending() { return !PendUpdates.empty(); }
-
-/// Flushes all pending updates and block deletions. Returns a
-/// correct DominatorTree reference to be used by the caller for analysis.
-DominatorTree &DeferredDominance::flush() {
- // Updates to DT must happen before blocks are deleted below. Otherwise the
- // DT traversal will encounter badref blocks and assert.
- if (!PendUpdates.empty()) {
- DT.applyUpdates(PendUpdates);
- PendUpdates.clear();
- }
- flushDelBB();
- return DT;
-}
-
-/// Drops all internal state and forces a (slow) recalculation of the
-/// DominatorTree based on the current state of the LLVM IR in F. This should
-/// only be used in corner cases such as the Entry block of F being deleted.
-void DeferredDominance::recalculate(Function &F) {
- // flushDelBB must be flushed before the recalculation. The state of the IR
- // must be consistent before the DT traversal algorithm determines the
- // actual DT.
- if (flushDelBB() || !PendUpdates.empty()) {
- DT.recalculate(F);
- PendUpdates.clear();
- }
-}
-
-/// Debug method to help view the state of pending updates.
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void DeferredDominance::dump() const {
- raw_ostream &OS = llvm::dbgs();
- OS << "PendUpdates:\n";
- int I = 0;
- for (auto U : PendUpdates) {
- OS << " " << I << " : ";
- ++I;
- if (U.getKind() == DominatorTree::Insert)
- OS << "Insert, ";
- else
- OS << "Delete, ";
- BasicBlock *From = U.getFrom();
- if (From) {
- auto S = From->getName();
- if (!From->hasName())
- S = "(no name)";
- OS << S << "(" << From << "), ";
- } else {
- OS << "(badref), ";
- }
- BasicBlock *To = U.getTo();
- if (To) {
- auto S = To->getName();
- if (!To->hasName())
- S = "(no_name)";
- OS << S << "(" << To << ")\n";
- } else {
- OS << "(badref)\n";
- }
- }
- OS << "DeletedBBs:\n";
- I = 0;
- for (auto BB : DeletedBBs) {
- OS << " " << I << " : ";
- ++I;
- if (BB->hasName())
- OS << BB->getName() << "(";
- else
- OS << "(no_name)(";
- OS << BB << ")\n";
- }
-}
-#endif
-
-/// Apply an update (Kind, From, To) to the internal queued updates. The
-/// update is only added when determined to be necessary. Checks for
-/// self-domination, unnecessary updates, duplicate requests, and balanced
-/// pairs of requests are all performed. Returns true if the update is
-/// queued and false if it is discarded.
-bool DeferredDominance::applyUpdate(DominatorTree::UpdateKind Kind,
- BasicBlock *From, BasicBlock *To) {
- if (From == To)
- return false; // Cannot dominate self; discard update.
-
- // Discard updates by inspecting the current state of successors of From.
- // Since applyUpdate() must be called *after* the Terminator of From is
- // altered we can determine if the update is unnecessary.
- bool HasEdge = std::any_of(succ_begin(From), succ_end(From),
- [To](BasicBlock *B) { return B == To; });
- if (Kind == DominatorTree::Insert && !HasEdge)
- return false; // Unnecessary Insert: edge does not exist in IR.
- if (Kind == DominatorTree::Delete && HasEdge)
- return false; // Unnecessary Delete: edge still exists in IR.
-
- // Analyze pending updates to determine if the update is unnecessary.
- DominatorTree::UpdateType Update = {Kind, From, To};
- DominatorTree::UpdateType Invert = {Kind != DominatorTree::Insert
- ? DominatorTree::Insert
- : DominatorTree::Delete,
- From, To};
- for (auto I = PendUpdates.begin(), E = PendUpdates.end(); I != E; ++I) {
- if (Update == *I)
- return false; // Discard duplicate updates.
- if (Invert == *I) {
- // Update and Invert are both valid (equivalent to a no-op). Remove
- // Invert from PendUpdates and discard the Update.
- PendUpdates.erase(I);
- return false;
- }
- }
- PendUpdates.push_back(Update); // Save the valid update.
- return true;
-}
-
-/// Performs all pending basic block deletions. We have to defer the deletion
-/// of these blocks until after the DominatorTree updates are applied. The
-/// internal workings of the DominatorTree code expect every update's From
-/// and To blocks to exist and to be a member of the same Function.
-bool DeferredDominance::flushDelBB() {
- if (DeletedBBs.empty())
- return false;
- for (auto *BB : DeletedBBs)
- BB->eraseFromParent();
- DeletedBBs.clear();
- return true;
-}
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index 72090f5bac3e..a88478b89bfc 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -24,7 +24,6 @@
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -195,14 +194,19 @@ LLVMContext &Function::getContext() const {
return getType()->getContext();
}
-unsigned Function::getInstructionCount() {
+unsigned Function::getInstructionCount() const {
unsigned NumInstrs = 0;
- for (BasicBlock &BB : BasicBlocks)
+ for (const BasicBlock &BB : BasicBlocks)
NumInstrs += std::distance(BB.instructionsWithoutDebug().begin(),
BB.instructionsWithoutDebug().end());
return NumInstrs;
}
+Function *Function::Create(FunctionType *Ty, LinkageTypes Linkage,
+ const Twine &N, Module &M) {
+ return Create(Ty, Linkage, M.getDataLayout().getProgramAddressSpace(), N, &M);
+}
+
void Function::removeFromParent() {
getParent()->getFunctionList().remove(getIterator());
}
@@ -215,10 +219,19 @@ void Function::eraseFromParent() {
// Function Implementation
//===----------------------------------------------------------------------===//
-Function::Function(FunctionType *Ty, LinkageTypes Linkage, const Twine &name,
- Module *ParentModule)
+static unsigned computeAddrSpace(unsigned AddrSpace, Module *M) {
+ // If AS == -1 and we are passed a valid module pointer we place the function
+ // in the program address space. Otherwise we default to AS0.
+ if (AddrSpace == static_cast<unsigned>(-1))
+ return M ? M->getDataLayout().getProgramAddressSpace() : 0;
+ return AddrSpace;
+}
+
+Function::Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
+ const Twine &name, Module *ParentModule)
: GlobalObject(Ty, Value::FunctionVal,
- OperandTraits<Function>::op_begin(this), 0, Linkage, name),
+ OperandTraits<Function>::op_begin(this), 0, Linkage, name,
+ computeAddrSpace(AddrSpace, ParentModule)),
NumArgs(Ty->getNumParams()) {
assert(FunctionType::isValidReturnType(getReturnType()) &&
"invalid return type");
@@ -1243,13 +1256,13 @@ bool Function::hasAddressTaken(const User* *PutOffender) const {
const User *FU = U.getUser();
if (isa<BlockAddress>(FU))
continue;
- if (!isa<CallInst>(FU) && !isa<InvokeInst>(FU)) {
+ const auto *Call = dyn_cast<CallBase>(FU);
+ if (!Call) {
if (PutOffender)
*PutOffender = FU;
return true;
}
- ImmutableCallSite CS(cast<Instruction>(FU));
- if (!CS.isCallee(&U)) {
+ if (!Call->isCallee(&U)) {
if (PutOffender)
*PutOffender = FU;
return true;
@@ -1275,12 +1288,10 @@ bool Function::isDefTriviallyDead() const {
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
bool Function::callsFunctionThatReturnsTwice() const {
- for (const_inst_iterator
- I = inst_begin(this), E = inst_end(this); I != E; ++I) {
- ImmutableCallSite CS(&*I);
- if (CS && CS.hasFnAttr(Attribute::ReturnsTwice))
- return true;
- }
+ for (const Instruction &I : instructions(this))
+ if (const auto *Call = dyn_cast<CallBase>(&I))
+ if (Call->hasFnAttr(Attribute::ReturnsTwice))
+ return true;
return false;
}
diff --git a/lib/IR/Globals.cpp b/lib/IR/Globals.cpp
index 20b2334a626f..cbd6450a20c9 100644
--- a/lib/IR/Globals.cpp
+++ b/lib/IR/Globals.cpp
@@ -108,6 +108,11 @@ unsigned GlobalValue::getAlignment() const {
return cast<GlobalObject>(this)->getAlignment();
}
+unsigned GlobalValue::getAddressSpace() const {
+ PointerType *PtrTy = getType();
+ return PtrTy->getAddressSpace();
+}
+
void GlobalObject::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
@@ -247,7 +252,7 @@ bool GlobalValue::canIncreaseAlignment() const {
// Conservatively assume ELF if there's no parent pointer.
bool isELF =
(!Parent || Triple(Parent->getTargetTriple()).isOSBinFormatELF());
- if (isELF && hasDefaultVisibility() && !hasLocalLinkage())
+ if (isELF && !isDSOLocal())
return false;
return true;
diff --git a/lib/IR/IRBuilder.cpp b/lib/IR/IRBuilder.cpp
index 405a56bfb31d..a98189956770 100644
--- a/lib/IR/IRBuilder.cpp
+++ b/lib/IR/IRBuilder.cpp
@@ -50,6 +50,7 @@ GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
nullptr, GlobalVariable::NotThreadLocal,
AddressSpace);
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(1);
return GV;
}
@@ -730,28 +731,29 @@ CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
return createCallHelper(FnGCRelocate, Args, this, Name);
}
-CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID,
- Value *LHS, Value *RHS,
- const Twine &Name) {
+CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
+ Instruction *FMFSource,
+ const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
- return createCallHelper(Fn, { LHS, RHS }, this, Name);
+ Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
+ return createCallHelper(Fn, {V}, this, Name, FMFSource);
}
-CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
- Instruction *FMFSource,
- const Twine &Name) {
+CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
+ Value *RHS,
+ Instruction *FMFSource,
+ const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID);
- return createCallHelper(Fn, {}, this, Name);
+ Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
+ return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
}
CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
+ ArrayRef<Type *> Types,
ArrayRef<Value *> Args,
Instruction *FMFSource,
const Twine &Name) {
- assert(!Args.empty() && "Expected at least one argument to intrinsic");
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, { Args.front()->getType() });
+ Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
return createCallHelper(Fn, Args, this, Name, FMFSource);
}
diff --git a/lib/IR/IRPrintingPasses.cpp b/lib/IR/IRPrintingPasses.cpp
index befe1d9ffb1c..43010220b9f3 100644
--- a/lib/IR/IRPrintingPasses.cpp
+++ b/lib/IR/IRPrintingPasses.cpp
@@ -27,7 +27,8 @@ PrintModulePass::PrintModulePass(raw_ostream &OS, const std::string &Banner,
ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {}
PreservedAnalyses PrintModulePass::run(Module &M, ModuleAnalysisManager &) {
- OS << Banner;
+ if (!Banner.empty())
+ OS << Banner << "\n";
if (llvm::isFunctionInPrintList("*"))
M.print(OS, nullptr, ShouldPreserveUseListOrder);
else {
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index 508db9bcaf19..d861b5288592 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -303,6 +303,9 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
case CatchPad: return "catchpad";
case CatchSwitch: return "catchswitch";
+ // Standard unary operators...
+ case FNeg: return "fneg";
+
// Standard binary operators...
case Add: return "add";
case FAdd: return "fadd";
@@ -592,7 +595,15 @@ bool Instruction::mayThrow() const {
bool Instruction::isSafeToRemove() const {
return (!isa<CallInst>(this) || !this->mayHaveSideEffects()) &&
- !isa<TerminatorInst>(this);
+ !this->isTerminator();
+}
+
+bool Instruction::isLifetimeStartOrEnd() const {
+ auto II = dyn_cast<IntrinsicInst>(this);
+ if (!II)
+ return false;
+ Intrinsic::ID ID = II->getIntrinsicID();
+ return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
}
const Instruction *Instruction::getNextNonDebugInstruction() const {
@@ -602,6 +613,13 @@ const Instruction *Instruction::getNextNonDebugInstruction() const {
return nullptr;
}
+const Instruction *Instruction::getPrevNonDebugInstruction() const {
+ for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
+ if (!isa<DbgInfoIntrinsic>(I))
+ return I;
+ return nullptr;
+}
+
bool Instruction::isAssociative() const {
unsigned Opcode = getOpcode();
if (isAssociative(Opcode))
@@ -617,6 +635,42 @@ bool Instruction::isAssociative() const {
}
}
+unsigned Instruction::getNumSuccessors() const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getNumSuccessors();
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
+BasicBlock *Instruction::getSuccessor(unsigned idx) const {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<const CLASS *>(this)->getSuccessor(idx);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
+void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
+ switch (getOpcode()) {
+#define HANDLE_TERM_INST(N, OPC, CLASS) \
+ case Instruction::OPC: \
+ return static_cast<CLASS *>(this)->setSuccessor(idx, B);
+#include "llvm/IR/Instruction.def"
+ default:
+ break;
+ }
+ llvm_unreachable("not a terminator");
+}
+
Instruction *Instruction::cloneImpl() const {
llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index 32db918dab97..06b46724a87f 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -27,6 +27,7 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
@@ -65,50 +66,7 @@ AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
//===----------------------------------------------------------------------===//
User::op_iterator CallSite::getCallee() const {
- Instruction *II(getInstruction());
- return isCall()
- ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
- : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee
-}
-
-//===----------------------------------------------------------------------===//
-// TerminatorInst Class
-//===----------------------------------------------------------------------===//
-
-unsigned TerminatorInst::getNumSuccessors() const {
- switch (getOpcode()) {
-#define HANDLE_TERM_INST(N, OPC, CLASS) \
- case Instruction::OPC: \
- return static_cast<const CLASS *>(this)->getNumSuccessors();
-#include "llvm/IR/Instruction.def"
- default:
- break;
- }
- llvm_unreachable("not a terminator");
-}
-
-BasicBlock *TerminatorInst::getSuccessor(unsigned idx) const {
- switch (getOpcode()) {
-#define HANDLE_TERM_INST(N, OPC, CLASS) \
- case Instruction::OPC: \
- return static_cast<const CLASS *>(this)->getSuccessor(idx);
-#include "llvm/IR/Instruction.def"
- default:
- break;
- }
- llvm_unreachable("not a terminator");
-}
-
-void TerminatorInst::setSuccessor(unsigned idx, BasicBlock *B) {
- switch (getOpcode()) {
-#define HANDLE_TERM_INST(N, OPC, CLASS) \
- case Instruction::OPC: \
- return static_cast<CLASS *>(this)->setSuccessor(idx, B);
-#include "llvm/IR/Instruction.def"
- default:
- break;
- }
- llvm_unreachable("not a terminator");
+ return cast<CallBase>(getInstruction())->op_end() - 1;
}
//===----------------------------------------------------------------------===//
@@ -294,6 +252,112 @@ void LandingPadInst::addClause(Constant *Val) {
}
//===----------------------------------------------------------------------===//
+// CallBase Implementation
+//===----------------------------------------------------------------------===//
+
+Function *CallBase::getCaller() { return getParent()->getParent(); }
+
+bool CallBase::isIndirectCall() const {
+ const Value *V = getCalledValue();
+ if (isa<Function>(V) || isa<Constant>(V))
+ return false;
+ if (const CallInst *CI = dyn_cast<CallInst>(this))
+ if (CI->isInlineAsm())
+ return false;
+ return true;
+}
+
+Intrinsic::ID CallBase::getIntrinsicID() const {
+ if (auto *F = getCalledFunction())
+ return F->getIntrinsicID();
+ return Intrinsic::not_intrinsic;
+}
+
+bool CallBase::isReturnNonNull() const {
+ if (hasRetAttr(Attribute::NonNull))
+ return true;
+
+ if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
+ !NullPointerIsDefined(getCaller(),
+ getType()->getPointerAddressSpace()))
+ return true;
+
+ return false;
+}
+
+Value *CallBase::getReturnedArgOperand() const {
+ unsigned Index;
+
+ if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+ if (const Function *F = getCalledFunction())
+ if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
+ Index)
+ return getArgOperand(Index - AttributeList::FirstArgIndex);
+
+ return nullptr;
+}
+
+bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const {
+ if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
+ return true;
+
+ // Look at the callee, if available.
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
+ return false;
+}
+
+/// Determine whether the argument or parameter has the given attribute.
+bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
+ assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
+
+ if (Attrs.hasParamAttribute(ArgNo, Kind))
+ return true;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasParamAttribute(ArgNo, Kind);
+ return false;
+}
+
+bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
+ return false;
+}
+
+bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
+ return false;
+}
+
+CallBase::op_iterator
+CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
+ const unsigned BeginIndex) {
+ auto It = op_begin() + BeginIndex;
+ for (auto &B : Bundles)
+ It = std::copy(B.input_begin(), B.input_end(), It);
+
+ auto *ContextImpl = getContext().pImpl;
+ auto BI = Bundles.begin();
+ unsigned CurrentIndex = BeginIndex;
+
+ for (auto &BOI : bundle_op_infos()) {
+ assert(BI != Bundles.end() && "Incorrect allocation?");
+
+ BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
+ BOI.Begin = CurrentIndex;
+ BOI.End = CurrentIndex + BI->input_size();
+ CurrentIndex = BOI.End;
+ BI++;
+ }
+
+ assert(BI == Bundles.end() && "Incorrect allocation?");
+
+ return It;
+}
+
+//===----------------------------------------------------------------------===//
// CallInst Implementation
//===----------------------------------------------------------------------===//
@@ -302,7 +366,7 @@ void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
this->FTy = FTy;
assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
"NumOperands not set up?");
- Op<-1>() = Func;
+ setCalledOperand(Func);
#ifndef NDEBUG
assert((Args.size() == FTy->getNumParams() ||
@@ -315,7 +379,7 @@ void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
"Calling a function with a bad signature!");
#endif
- std::copy(Args.begin(), Args.end(), op_begin());
+ llvm::copy(Args, op_begin());
auto It = populateBundleOperandInfos(Bundles, Args.size());
(void)It;
@@ -324,43 +388,34 @@ void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
setName(NameStr);
}
-void CallInst::init(Value *Func, const Twine &NameStr) {
- FTy =
- cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
+void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
+ this->FTy = FTy;
assert(getNumOperands() == 1 && "NumOperands not set up?");
- Op<-1>() = Func;
+ setCalledOperand(Func);
assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
setName(NameStr);
}
-CallInst::CallInst(Value *Func, const Twine &Name, Instruction *InsertBefore)
- : CallBase<CallInst>(
- cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType())
- ->getReturnType(),
- Instruction::Call,
- OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1,
- InsertBefore) {
- init(Func, Name);
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ Instruction *InsertBefore)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
+ init(Ty, Func, Name);
}
-CallInst::CallInst(Value *Func, const Twine &Name, BasicBlock *InsertAtEnd)
- : CallBase<CallInst>(
- cast<FunctionType>(
- cast<PointerType>(Func->getType())->getElementType())
- ->getReturnType(),
- Instruction::Call,
- OperandTraits<CallBase<CallInst>>::op_end(this) - 1, 1, InsertAtEnd) {
- init(Func, Name);
+CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : CallBase(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
+ init(Ty, Func, Name);
}
CallInst::CallInst(const CallInst &CI)
- : CallBase<CallInst>(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
- OperandTraits<CallBase<CallInst>>::op_end(this) -
- CI.getNumOperands(),
- CI.getNumOperands()) {
+ : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
+ OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
+ CI.getNumOperands()) {
setTailCallKind(CI.getTailCallKind());
setCallingConv(CI.getCallingConv());
@@ -600,11 +655,12 @@ void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
const Twine &NameStr) {
this->FTy = FTy;
- assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) &&
+ assert((int)getNumOperands() ==
+ ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
"NumOperands not set up?");
- Op<-3>() = Fn;
- Op<-2>() = IfNormal;
- Op<-1>() = IfException;
+ setNormalDest(IfNormal);
+ setUnwindDest(IfException);
+ setCalledOperand(Fn);
#ifndef NDEBUG
assert(((Args.size() == FTy->getNumParams()) ||
@@ -617,7 +673,7 @@ void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
"Invoking a function with a bad signature!");
#endif
- std::copy(Args.begin(), Args.end(), op_begin());
+ llvm::copy(Args, op_begin());
auto It = populateBundleOperandInfos(Bundles, Args.size());
(void)It;
@@ -627,10 +683,9 @@ void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
}
InvokeInst::InvokeInst(const InvokeInst &II)
- : CallBase<InvokeInst>(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
- OperandTraits<CallBase<InvokeInst>>::op_end(this) -
- II.getNumOperands(),
- II.getNumOperands()) {
+ : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
+ OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
+ II.getNumOperands()) {
setCallingConv(II.getCallingConv());
std::copy(II.op_begin(), II.op_end(), op_begin());
std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
@@ -662,55 +717,53 @@ LandingPadInst *InvokeInst::getLandingPadInst() const {
//===----------------------------------------------------------------------===//
ReturnInst::ReturnInst(const ReturnInst &RI)
- : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret,
- OperandTraits<ReturnInst>::op_end(this) -
- RI.getNumOperands(),
- RI.getNumOperands()) {
+ : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
+ RI.getNumOperands()) {
if (RI.getNumOperands())
Op<0>() = RI.Op<0>();
SubclassOptionalData = RI.SubclassOptionalData;
}
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
- OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
- InsertBefore) {
+ : Instruction(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertBefore) {
if (retVal)
Op<0>() = retVal;
}
ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
- OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
- InsertAtEnd) {
+ : Instruction(Type::getVoidTy(C), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
+ InsertAtEnd) {
if (retVal)
Op<0>() = retVal;
}
ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret,
- OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {
-}
+ : Instruction(Type::getVoidTy(Context), Instruction::Ret,
+ OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
//===----------------------------------------------------------------------===//
// ResumeInst Implementation
//===----------------------------------------------------------------------===//
ResumeInst::ResumeInst(const ResumeInst &RI)
- : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume,
- OperandTraits<ResumeInst>::op_begin(this), 1) {
+ : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1) {
Op<0>() = RI.Op<0>();
}
ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
- OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
+ : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
Op<0>() = Exn;
}
ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
- OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
+ OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
Op<0>() = Exn;
}
@@ -719,10 +772,10 @@ ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
//===----------------------------------------------------------------------===//
CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
- : TerminatorInst(CRI.getType(), Instruction::CleanupRet,
- OperandTraits<CleanupReturnInst>::op_end(this) -
- CRI.getNumOperands(),
- CRI.getNumOperands()) {
+ : Instruction(CRI.getType(), Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) -
+ CRI.getNumOperands(),
+ CRI.getNumOperands()) {
setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
Op<0>() = CRI.Op<0>();
if (CRI.hasUnwindDest())
@@ -740,19 +793,19 @@ void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
unsigned Values, Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
- Instruction::CleanupRet,
- OperandTraits<CleanupReturnInst>::op_end(this) - Values,
- Values, InsertBefore) {
+ : Instruction(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertBefore) {
init(CleanupPad, UnwindBB);
}
CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
unsigned Values, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
- Instruction::CleanupRet,
- OperandTraits<CleanupReturnInst>::op_end(this) - Values,
- Values, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(CleanupPad->getContext()),
+ Instruction::CleanupRet,
+ OperandTraits<CleanupReturnInst>::op_end(this) - Values,
+ Values, InsertAtEnd) {
init(CleanupPad, UnwindBB);
}
@@ -765,25 +818,25 @@ void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
}
CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
- : TerminatorInst(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
- OperandTraits<CatchReturnInst>::op_begin(this), 2) {
+ : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2) {
Op<0>() = CRI.Op<0>();
Op<1>() = CRI.Op<1>();
}
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
- OperandTraits<CatchReturnInst>::op_begin(this), 2,
- InsertBefore) {
+ : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertBefore) {
init(CatchPad, BB);
}
CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
- OperandTraits<CatchReturnInst>::op_begin(this), 2,
- InsertAtEnd) {
+ : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
+ OperandTraits<CatchReturnInst>::op_begin(this), 2,
+ InsertAtEnd) {
init(CatchPad, BB);
}
@@ -795,8 +848,8 @@ CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumReservedValues,
const Twine &NameStr,
Instruction *InsertBefore)
- : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
- InsertBefore) {
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertBefore) {
if (UnwindDest)
++NumReservedValues;
init(ParentPad, UnwindDest, NumReservedValues + 1);
@@ -806,8 +859,8 @@ CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumReservedValues,
const Twine &NameStr, BasicBlock *InsertAtEnd)
- : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
- InsertAtEnd) {
+ : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
+ InsertAtEnd) {
if (UnwindDest)
++NumReservedValues;
init(ParentPad, UnwindDest, NumReservedValues + 1);
@@ -815,8 +868,8 @@ CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
}
CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
- : TerminatorInst(CSI.getType(), Instruction::CatchSwitch, nullptr,
- CSI.getNumOperands()) {
+ : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
+ CSI.getNumOperands()) {
init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
setNumHungOffUseOperands(ReservedSpace);
Use *OL = getOperandList();
@@ -876,7 +929,7 @@ void CatchSwitchInst::removeHandler(handler_iterator HI) {
void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
const Twine &NameStr) {
assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
- std::copy(Args.begin(), Args.end(), op_begin());
+ llvm::copy(Args, op_begin());
setParentPad(ParentPad);
setName(NameStr);
}
@@ -914,13 +967,11 @@ FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
UnreachableInst::UnreachableInst(LLVMContext &Context,
Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- nullptr, 0, InsertBefore) {
-}
+ : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
+ 0, InsertBefore) {}
UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
- nullptr, 0, InsertAtEnd) {
-}
+ : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
+ 0, InsertAtEnd) {}
//===----------------------------------------------------------------------===//
// BranchInst Implementation
@@ -933,18 +984,18 @@ void BranchInst::AssertOK() {
}
BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
- OperandTraits<BranchInst>::op_end(this) - 1,
- 1, InsertBefore) {
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1, 1,
+ InsertBefore) {
assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
- OperandTraits<BranchInst>::op_end(this) - 3,
- 3, InsertBefore) {
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3, 3,
+ InsertBefore) {
Op<-1>() = IfTrue;
Op<-2>() = IfFalse;
Op<-3>() = Cond;
@@ -954,18 +1005,16 @@ BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
}
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
- OperandTraits<BranchInst>::op_end(this) - 1,
- 1, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
assert(IfTrue && "Branch destination may not be null!");
Op<-1>() = IfTrue;
}
BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
- BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
- OperandTraits<BranchInst>::op_end(this) - 3,
- 3, InsertAtEnd) {
+ BasicBlock *InsertAtEnd)
+ : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
Op<-1>() = IfTrue;
Op<-2>() = IfFalse;
Op<-3>() = Cond;
@@ -974,10 +1023,10 @@ BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
#endif
}
-BranchInst::BranchInst(const BranchInst &BI) :
- TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br,
- OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
- BI.getNumOperands()) {
+BranchInst::BranchInst(const BranchInst &BI)
+ : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
+ OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
+ BI.getNumOperands()) {
Op<-1>() = BI.Op<-1>();
if (BI.getNumOperands() != 1) {
assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
@@ -1089,28 +1138,30 @@ void LoadInst::AssertOK() {
"Alignment required for atomic load");
}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
- : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {}
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+ Instruction *InsertBef)
+ : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
- : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {}
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
+ BasicBlock *InsertAE)
+ : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
BasicBlock *InsertAE)
- : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
+ : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertBef) {}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
- : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
+ : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
@@ -1125,12 +1176,11 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
setName(Name);
}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
- unsigned Align, AtomicOrdering Order,
- SyncScope::ID SSID,
+LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
+ unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
BasicBlock *InsertAE)
- : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
- Load, Ptr, InsertAE) {
+ : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
+ assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(Align);
setAtomic(Order, SSID);
@@ -1138,48 +1188,6 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
setName(Name);
}
-LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
- : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
- Load, Ptr, InsertBef) {
- setVolatile(false);
- setAlignment(0);
- setAtomic(AtomicOrdering::NotAtomic);
- AssertOK();
- if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
- : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
- Load, Ptr, InsertAE) {
- setVolatile(false);
- setAlignment(0);
- setAtomic(AtomicOrdering::NotAtomic);
- AssertOK();
- if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
- Instruction *InsertBef)
- : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
- assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
- setVolatile(isVolatile);
- setAlignment(0);
- setAtomic(AtomicOrdering::NotAtomic);
- AssertOK();
- if (Name && Name[0]) setName(Name);
-}
-
-LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
- BasicBlock *InsertAE)
- : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
- Load, Ptr, InsertAE) {
- setVolatile(isVolatile);
- setAlignment(0);
- setAtomic(AtomicOrdering::NotAtomic);
- AssertOK();
- if (Name && Name[0]) setName(Name);
-}
-
void LoadInst::setAlignment(unsigned Align) {
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
assert(Align <= MaximumAlignment &&
@@ -1376,6 +1384,37 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
Init(Operation, Ptr, Val, Ordering, SSID);
}
+StringRef AtomicRMWInst::getOperationName(BinOp Op) {
+ switch (Op) {
+ case AtomicRMWInst::Xchg:
+ return "xchg";
+ case AtomicRMWInst::Add:
+ return "add";
+ case AtomicRMWInst::Sub:
+ return "sub";
+ case AtomicRMWInst::And:
+ return "and";
+ case AtomicRMWInst::Nand:
+ return "nand";
+ case AtomicRMWInst::Or:
+ return "or";
+ case AtomicRMWInst::Xor:
+ return "xor";
+ case AtomicRMWInst::Max:
+ return "max";
+ case AtomicRMWInst::Min:
+ return "min";
+ case AtomicRMWInst::UMax:
+ return "umax";
+ case AtomicRMWInst::UMin:
+ return "umin";
+ case AtomicRMWInst::BAD_BINOP:
+ return "<invalid operation>";
+ }
+
+ llvm_unreachable("invalid atomicrmw operation");
+}
+
//===----------------------------------------------------------------------===//
// FenceInst Implementation
//===----------------------------------------------------------------------===//
@@ -1405,7 +1444,7 @@ void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
assert(getNumOperands() == 1 + IdxList.size() &&
"NumOperands not initialized?");
Op<0>() = Ptr;
- std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
+ llvm::copy(IdxList, op_begin() + 1);
setName(Name);
}
@@ -1700,17 +1739,17 @@ void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
}
}
-bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
+static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
assert(!Mask.empty() && "Shuffle mask must contain elements");
bool UsesLHS = false;
bool UsesRHS = false;
- for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
+ for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
if (Mask[i] == -1)
continue;
- assert(Mask[i] >= 0 && Mask[i] < (NumElts * 2) &&
+ assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) &&
"Out-of-bounds shuffle mask element");
- UsesLHS |= (Mask[i] < NumElts);
- UsesRHS |= (Mask[i] >= NumElts);
+ UsesLHS |= (Mask[i] < NumOpElts);
+ UsesRHS |= (Mask[i] >= NumOpElts);
if (UsesLHS && UsesRHS)
return false;
}
@@ -1718,18 +1757,30 @@ bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
return true;
}
-bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
- if (!isSingleSourceMask(Mask))
+bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
+ // We don't have vector operand size information, so assume operands are the
+ // same size as the mask.
+ return isSingleSourceMaskImpl(Mask, Mask.size());
+}
+
+static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
+ if (!isSingleSourceMaskImpl(Mask, NumOpElts))
return false;
- for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
+ for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
if (Mask[i] == -1)
continue;
- if (Mask[i] != i && Mask[i] != (NumElts + i))
+ if (Mask[i] != i && Mask[i] != (NumOpElts + i))
return false;
}
return true;
}
+bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
+ // We don't have vector operand size information, so assume operands are the
+ // same size as the mask.
+ return isIdentityMaskImpl(Mask, Mask.size());
+}
+
bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
if (!isSingleSourceMask(Mask))
return false;
@@ -1801,6 +1852,79 @@ bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
return true;
}
+bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
+ int NumSrcElts, int &Index) {
+ // Must extract from a single source.
+ if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
+ return false;
+
+ // Must be smaller (else this is an Identity shuffle).
+ if (NumSrcElts <= (int)Mask.size())
+ return false;
+
+ // Find start of extraction, accounting that we may start with an UNDEF.
+ int SubIndex = -1;
+ for (int i = 0, e = Mask.size(); i != e; ++i) {
+ int M = Mask[i];
+ if (M < 0)
+ continue;
+ int Offset = (M % NumSrcElts) - i;
+ if (0 <= SubIndex && SubIndex != Offset)
+ return false;
+ SubIndex = Offset;
+ }
+
+ if (0 <= SubIndex) {
+ Index = SubIndex;
+ return true;
+ }
+ return false;
+}
+
+bool ShuffleVectorInst::isIdentityWithPadding() const {
+ int NumOpElts = Op<0>()->getType()->getVectorNumElements();
+ int NumMaskElts = getType()->getVectorNumElements();
+ if (NumMaskElts <= NumOpElts)
+ return false;
+
+ // The first part of the mask must choose elements from exactly 1 source op.
+ SmallVector<int, 16> Mask = getShuffleMask();
+ if (!isIdentityMaskImpl(Mask, NumOpElts))
+ return false;
+
+ // All extending must be with undef elements.
+ for (int i = NumOpElts; i < NumMaskElts; ++i)
+ if (Mask[i] != -1)
+ return false;
+
+ return true;
+}
+
+bool ShuffleVectorInst::isIdentityWithExtract() const {
+ int NumOpElts = Op<0>()->getType()->getVectorNumElements();
+ int NumMaskElts = getType()->getVectorNumElements();
+ if (NumMaskElts >= NumOpElts)
+ return false;
+
+ return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
+}
+
+bool ShuffleVectorInst::isConcat() const {
+ // Vector concatenation is differentiated from identity with padding.
+ if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
+ return false;
+
+ int NumOpElts = Op<0>()->getType()->getVectorNumElements();
+ int NumMaskElts = getType()->getVectorNumElements();
+ if (NumMaskElts != NumOpElts * 2)
+ return false;
+
+ // Use the mask length rather than the operands' vector lengths here. We
+ // already know that the shuffle returns a vector twice as long as the inputs,
+ // and neither of the inputs are undef vectors. If the mask picks consecutive
+ // elements from both inputs, then this is a concatenation of the inputs.
+ return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
+}
//===----------------------------------------------------------------------===//
// InsertValueInst Class
@@ -1887,6 +2011,59 @@ Type *ExtractValueInst::getIndexedType(Type *Agg,
}
//===----------------------------------------------------------------------===//
+// UnaryOperator Class
+//===----------------------------------------------------------------------===//
+
+UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
+ Type *Ty, const Twine &Name,
+ Instruction *InsertBefore)
+ : UnaryInstruction(Ty, iType, S, InsertBefore) {
+ Op<0>() = S;
+ setName(Name);
+ AssertOK();
+}
+
+UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
+ Type *Ty, const Twine &Name,
+ BasicBlock *InsertAtEnd)
+ : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
+ Op<0>() = S;
+ setName(Name);
+ AssertOK();
+}
+
+UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
+ const Twine &Name,
+ Instruction *InsertBefore) {
+ return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
+}
+
+UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
+ const Twine &Name,
+ BasicBlock *InsertAtEnd) {
+ UnaryOperator *Res = Create(Op, S, Name);
+ InsertAtEnd->getInstList().push_back(Res);
+ return Res;
+}
+
+void UnaryOperator::AssertOK() {
+ Value *LHS = getOperand(0);
+ (void)LHS; // Silence warnings.
+#ifndef NDEBUG
+ switch (getOpcode()) {
+ case FNeg:
+ assert(getType() == LHS->getType() &&
+ "Unary operation should return same type as operand!");
+ assert(getType()->isFPOrFPVectorTy() &&
+ "Tried to create a floating-point operation on a "
+ "non-floating-point type!");
+ break;
+ default: llvm_unreachable("Invalid opcode provided");
+ }
+#endif
+}
+
+//===----------------------------------------------------------------------===//
// BinaryOperator Class
//===----------------------------------------------------------------------===//
@@ -2068,71 +2245,6 @@ BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
Op->getType(), Name, InsertAtEnd);
}
-// isConstantAllOnes - Helper function for several functions below
-static inline bool isConstantAllOnes(const Value *V) {
- if (const Constant *C = dyn_cast<Constant>(V))
- return C->isAllOnesValue();
- return false;
-}
-
-bool BinaryOperator::isNeg(const Value *V) {
- if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
- if (Bop->getOpcode() == Instruction::Sub)
- if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0)))
- return C->isNegativeZeroValue();
- return false;
-}
-
-bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) {
- if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
- if (Bop->getOpcode() == Instruction::FSub)
- if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) {
- if (!IgnoreZeroSign)
- IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros();
- return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue();
- }
- return false;
-}
-
-bool BinaryOperator::isNot(const Value *V) {
- if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
- return (Bop->getOpcode() == Instruction::Xor &&
- (isConstantAllOnes(Bop->getOperand(1)) ||
- isConstantAllOnes(Bop->getOperand(0))));
- return false;
-}
-
-Value *BinaryOperator::getNegArgument(Value *BinOp) {
- return cast<BinaryOperator>(BinOp)->getOperand(1);
-}
-
-const Value *BinaryOperator::getNegArgument(const Value *BinOp) {
- return getNegArgument(const_cast<Value*>(BinOp));
-}
-
-Value *BinaryOperator::getFNegArgument(Value *BinOp) {
- return cast<BinaryOperator>(BinOp)->getOperand(1);
-}
-
-const Value *BinaryOperator::getFNegArgument(const Value *BinOp) {
- return getFNegArgument(const_cast<Value*>(BinOp));
-}
-
-Value *BinaryOperator::getNotArgument(Value *BinOp) {
- assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!");
- BinaryOperator *BO = cast<BinaryOperator>(BinOp);
- Value *Op0 = BO->getOperand(0);
- Value *Op1 = BO->getOperand(1);
- if (isConstantAllOnes(Op0)) return Op1;
-
- assert(isConstantAllOnes(Op1));
- return Op0;
-}
-
-const Value *BinaryOperator::getNotArgument(const Value *BinOp) {
- return getNotArgument(const_cast<Value*>(BinOp));
-}
-
// Exchange the two operands to this instruction. This instruction is safe to
// use on any binary instruction and does not modify the semantics of the
// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
@@ -2978,12 +3090,14 @@ CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
return false;
// A vector of pointers must have the same number of elements.
- if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
- if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
- return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
-
- return false;
- }
+ VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy);
+ VectorType *DstVecTy = dyn_cast<VectorType>(DstTy);
+ if (SrcVecTy && DstVecTy)
+ return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
+ if (SrcVecTy)
+ return SrcVecTy->getNumElements() == 1;
+ if (DstVecTy)
+ return DstVecTy->getNumElements() == 1;
return true;
}
@@ -3171,15 +3285,18 @@ AddrSpaceCastInst::AddrSpaceCastInst(
//===----------------------------------------------------------------------===//
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
- Value *RHS, const Twine &Name, Instruction *InsertBefore)
+ Value *RHS, const Twine &Name, Instruction *InsertBefore,
+ Instruction *FlagsSource)
: Instruction(ty, op,
OperandTraits<CmpInst>::op_begin(this),
OperandTraits<CmpInst>::operands(this),
InsertBefore) {
- Op<0>() = LHS;
- Op<1>() = RHS;
+ Op<0>() = LHS;
+ Op<1>() = RHS;
setPredicate((Predicate)predicate);
setName(Name);
+ if (FlagsSource)
+ copyIRFlags(FlagsSource);
}
CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
@@ -3518,8 +3635,8 @@ void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
/// constructor can also autoinsert before another instruction.
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
Instruction *InsertBefore)
- : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- nullptr, 0, InsertBefore) {
+ : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertBefore) {
init(Value, Default, 2+NumCases*2);
}
@@ -3529,13 +3646,13 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
/// constructor also autoinserts at the end of the specified BasicBlock.
SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
BasicBlock *InsertAtEnd)
- : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
- nullptr, 0, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
+ nullptr, 0, InsertAtEnd) {
init(Value, Default, 2+NumCases*2);
}
SwitchInst::SwitchInst(const SwitchInst &SI)
- : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
+ : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
setNumHungOffUseOperands(SI.getNumOperands());
Use *OL = getOperandList();
@@ -3547,7 +3664,6 @@ SwitchInst::SwitchInst(const SwitchInst &SI)
SubclassOptionalData = SI.SubclassOptionalData;
}
-
/// addCase - Add an entry to the switch instruction...
///
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
@@ -3626,21 +3742,21 @@ void IndirectBrInst::growOperands() {
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
Instruction *InsertBefore)
-: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- nullptr, 0, InsertBefore) {
+ : Instruction(Type::getVoidTy(Address->getContext()),
+ Instruction::IndirectBr, nullptr, 0, InsertBefore) {
init(Address, NumCases);
}
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
BasicBlock *InsertAtEnd)
-: TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
- nullptr, 0, InsertAtEnd) {
+ : Instruction(Type::getVoidTy(Address->getContext()),
+ Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
init(Address, NumCases);
}
IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
- : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
- nullptr, IBI.getNumOperands()) {
+ : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
+ nullptr, IBI.getNumOperands()) {
allocHungoffUses(IBI.getNumOperands());
Use *OL = getOperandList();
const Use *InOL = IBI.getOperandList();
@@ -3688,6 +3804,10 @@ GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
return new (getNumOperands()) GetElementPtrInst(*this);
}
+UnaryOperator *UnaryOperator::cloneImpl() const {
+ return Create(getOpcode(), Op<0>());
+}
+
BinaryOperator *BinaryOperator::cloneImpl() const {
return Create(getOpcode(), Op<0>(), Op<1>());
}
@@ -3718,7 +3838,7 @@ AllocaInst *AllocaInst::cloneImpl() const {
}
LoadInst *LoadInst::cloneImpl() const {
- return new LoadInst(getOperand(0), Twine(), isVolatile(),
+ return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
getAlignment(), getOrdering(), getSyncScopeID());
}
diff --git a/lib/IR/IntrinsicInst.cpp b/lib/IR/IntrinsicInst.cpp
index 787889934d82..df3a38ac147f 100644
--- a/lib/IR/IntrinsicInst.cpp
+++ b/lib/IR/IntrinsicInst.cpp
@@ -32,10 +32,11 @@
using namespace llvm;
//===----------------------------------------------------------------------===//
-/// DbgInfoIntrinsic - This is the common base class for debug info intrinsics
+/// DbgVariableIntrinsic - This is the common base class for debug info
+/// intrinsics for variables.
///
-Value *DbgInfoIntrinsic::getVariableLocation(bool AllowNullOp) const {
+Value *DbgVariableIntrinsic::getVariableLocation(bool AllowNullOp) const {
Value *Op = getArgOperand(0);
if (AllowNullOp && !Op)
return nullptr;
@@ -45,14 +46,11 @@ Value *DbgInfoIntrinsic::getVariableLocation(bool AllowNullOp) const {
return V->getValue();
// When the value goes to null, it gets replaced by an empty MDNode.
- assert((isa<DbgLabelInst>(this)
- || !cast<MDNode>(MD)->getNumOperands())
- && "DbgValueInst Expected an empty MDNode");
-
+ assert(!cast<MDNode>(MD)->getNumOperands() && "Expected an empty MDNode");
return nullptr;
}
-Optional<uint64_t> DbgInfoIntrinsic::getFragmentSizeInBits() const {
+Optional<uint64_t> DbgVariableIntrinsic::getFragmentSizeInBits() const {
if (auto Fragment = getExpression()->getFragmentInfo())
return Fragment->SizeInBits;
return getVariable()->getSizeInBits();
@@ -154,6 +152,10 @@ bool ConstrainedFPIntrinsic::isUnaryOp() const {
case Intrinsic::experimental_constrained_log2:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
+ case Intrinsic::experimental_constrained_ceil:
+ case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_round:
+ case Intrinsic::experimental_constrained_trunc:
return true;
}
}
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 62d9e387162e..944d8265151d 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -61,6 +61,7 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
{MD_associated, "associated"},
{MD_callees, "callees"},
{MD_irr_loop, "irr_loop"},
+ {MD_access_group, "llvm.access.group"},
};
for (auto &MDKind : MDKinds) {
diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h
index 3b2e1e81b1c1..2d120869860a 100644
--- a/lib/IR/LLVMContextImpl.h
+++ b/lib/IR/LLVMContextImpl.h
@@ -280,21 +280,24 @@ template <> struct MDNodeKeyImpl<DILocation> {
unsigned Column;
Metadata *Scope;
Metadata *InlinedAt;
+ bool ImplicitCode;
MDNodeKeyImpl(unsigned Line, unsigned Column, Metadata *Scope,
- Metadata *InlinedAt)
- : Line(Line), Column(Column), Scope(Scope), InlinedAt(InlinedAt) {}
+ Metadata *InlinedAt, bool ImplicitCode)
+ : Line(Line), Column(Column), Scope(Scope), InlinedAt(InlinedAt),
+ ImplicitCode(ImplicitCode) {}
MDNodeKeyImpl(const DILocation *L)
: Line(L->getLine()), Column(L->getColumn()), Scope(L->getRawScope()),
- InlinedAt(L->getRawInlinedAt()) {}
+ InlinedAt(L->getRawInlinedAt()), ImplicitCode(L->isImplicitCode()) {}
bool isKeyOf(const DILocation *RHS) const {
return Line == RHS->getLine() && Column == RHS->getColumn() &&
- Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt();
+ Scope == RHS->getRawScope() && InlinedAt == RHS->getRawInlinedAt() &&
+ ImplicitCode == RHS->isImplicitCode();
}
unsigned getHashValue() const {
- return hash_combine(Line, Column, Scope, InlinedAt);
+ return hash_combine(Line, Column, Scope, InlinedAt, ImplicitCode);
}
};
@@ -376,20 +379,22 @@ template <> struct MDNodeKeyImpl<DIBasicType> {
uint64_t SizeInBits;
uint32_t AlignInBits;
unsigned Encoding;
+ unsigned Flags;
MDNodeKeyImpl(unsigned Tag, MDString *Name, uint64_t SizeInBits,
- uint32_t AlignInBits, unsigned Encoding)
+ uint32_t AlignInBits, unsigned Encoding, unsigned Flags)
: Tag(Tag), Name(Name), SizeInBits(SizeInBits), AlignInBits(AlignInBits),
- Encoding(Encoding) {}
+ Encoding(Encoding), Flags(Flags) {}
MDNodeKeyImpl(const DIBasicType *N)
: Tag(N->getTag()), Name(N->getRawName()), SizeInBits(N->getSizeInBits()),
- AlignInBits(N->getAlignInBits()), Encoding(N->getEncoding()) {}
+ AlignInBits(N->getAlignInBits()), Encoding(N->getEncoding()), Flags(N->getFlags()) {}
bool isKeyOf(const DIBasicType *RHS) const {
return Tag == RHS->getTag() && Name == RHS->getRawName() &&
SizeInBits == RHS->getSizeInBits() &&
AlignInBits == RHS->getAlignInBits() &&
- Encoding == RHS->getEncoding();
+ Encoding == RHS->getEncoding() &&
+ Flags == RHS->getFlags();
}
unsigned getHashValue() const {
@@ -607,15 +612,12 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
Metadata *File;
unsigned Line;
Metadata *Type;
- bool IsLocalToUnit;
- bool IsDefinition;
unsigned ScopeLine;
Metadata *ContainingType;
- unsigned Virtuality;
unsigned VirtualIndex;
int ThisAdjustment;
unsigned Flags;
- bool IsOptimized;
+ unsigned SPFlags;
Metadata *Unit;
Metadata *TemplateParams;
Metadata *Declaration;
@@ -624,45 +626,39 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *LinkageName,
Metadata *File, unsigned Line, Metadata *Type,
- bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
- Metadata *ContainingType, unsigned Virtuality,
+ unsigned ScopeLine, Metadata *ContainingType,
unsigned VirtualIndex, int ThisAdjustment, unsigned Flags,
- bool IsOptimized, Metadata *Unit, Metadata *TemplateParams,
+ unsigned SPFlags, Metadata *Unit, Metadata *TemplateParams,
Metadata *Declaration, Metadata *RetainedNodes,
Metadata *ThrownTypes)
: Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
- Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
- IsDefinition(IsDefinition), ScopeLine(ScopeLine),
- ContainingType(ContainingType), Virtuality(Virtuality),
- VirtualIndex(VirtualIndex), ThisAdjustment(ThisAdjustment),
- Flags(Flags), IsOptimized(IsOptimized), Unit(Unit),
- TemplateParams(TemplateParams), Declaration(Declaration),
+ Line(Line), Type(Type), ScopeLine(ScopeLine),
+ ContainingType(ContainingType), VirtualIndex(VirtualIndex),
+ ThisAdjustment(ThisAdjustment), Flags(Flags), SPFlags(SPFlags),
+ Unit(Unit), TemplateParams(TemplateParams), Declaration(Declaration),
RetainedNodes(RetainedNodes), ThrownTypes(ThrownTypes) {}
MDNodeKeyImpl(const DISubprogram *N)
: Scope(N->getRawScope()), Name(N->getRawName()),
LinkageName(N->getRawLinkageName()), File(N->getRawFile()),
- Line(N->getLine()), Type(N->getRawType()),
- IsLocalToUnit(N->isLocalToUnit()), IsDefinition(N->isDefinition()),
- ScopeLine(N->getScopeLine()), ContainingType(N->getRawContainingType()),
- Virtuality(N->getVirtuality()), VirtualIndex(N->getVirtualIndex()),
+ Line(N->getLine()), Type(N->getRawType()), ScopeLine(N->getScopeLine()),
+ ContainingType(N->getRawContainingType()),
+ VirtualIndex(N->getVirtualIndex()),
ThisAdjustment(N->getThisAdjustment()), Flags(N->getFlags()),
- IsOptimized(N->isOptimized()), Unit(N->getRawUnit()),
+ SPFlags(N->getSPFlags()), Unit(N->getRawUnit()),
TemplateParams(N->getRawTemplateParams()),
- Declaration(N->getRawDeclaration()), RetainedNodes(N->getRawRetainedNodes()),
+ Declaration(N->getRawDeclaration()),
+ RetainedNodes(N->getRawRetainedNodes()),
ThrownTypes(N->getRawThrownTypes()) {}
bool isKeyOf(const DISubprogram *RHS) const {
return Scope == RHS->getRawScope() && Name == RHS->getRawName() &&
LinkageName == RHS->getRawLinkageName() &&
File == RHS->getRawFile() && Line == RHS->getLine() &&
- Type == RHS->getRawType() && IsLocalToUnit == RHS->isLocalToUnit() &&
- IsDefinition == RHS->isDefinition() &&
- ScopeLine == RHS->getScopeLine() &&
+ Type == RHS->getRawType() && ScopeLine == RHS->getScopeLine() &&
ContainingType == RHS->getRawContainingType() &&
- Virtuality == RHS->getVirtuality() &&
VirtualIndex == RHS->getVirtualIndex() &&
ThisAdjustment == RHS->getThisAdjustment() &&
- Flags == RHS->getFlags() && IsOptimized == RHS->isOptimized() &&
+ Flags == RHS->getFlags() && SPFlags == RHS->getSPFlags() &&
Unit == RHS->getUnit() &&
TemplateParams == RHS->getRawTemplateParams() &&
Declaration == RHS->getRawDeclaration() &&
@@ -670,11 +666,13 @@ template <> struct MDNodeKeyImpl<DISubprogram> {
ThrownTypes == RHS->getRawThrownTypes();
}
+ bool isDefinition() const { return SPFlags & DISubprogram::SPFlagDefinition; }
+
unsigned getHashValue() const {
// If this is a declaration inside an ODR type, only hash the type and the
// name. Otherwise the hash will be stronger than
// MDNodeSubsetEqualImpl::isDeclarationOfODRMember().
- if (!IsDefinition && LinkageName)
+ if (!isDefinition() && LinkageName)
if (auto *CT = dyn_cast_or_null<DICompositeType>(Scope))
if (CT->getRawIdentifier())
return hash_combine(LinkageName, Scope);
@@ -691,7 +689,7 @@ template <> struct MDNodeSubsetEqualImpl<DISubprogram> {
using KeyTy = MDNodeKeyImpl<DISubprogram>;
static bool isSubsetEqual(const KeyTy &LHS, const DISubprogram *RHS) {
- return isDeclarationOfODRMember(LHS.IsDefinition, LHS.Scope,
+ return isDeclarationOfODRMember(LHS.isDefinition(), LHS.Scope,
LHS.LinkageName, LHS.TemplateParams, RHS);
}
@@ -865,23 +863,26 @@ template <> struct MDNodeKeyImpl<DIGlobalVariable> {
bool IsLocalToUnit;
bool IsDefinition;
Metadata *StaticDataMemberDeclaration;
+ Metadata *TemplateParams;
uint32_t AlignInBits;
MDNodeKeyImpl(Metadata *Scope, MDString *Name, MDString *LinkageName,
Metadata *File, unsigned Line, Metadata *Type,
bool IsLocalToUnit, bool IsDefinition,
- Metadata *StaticDataMemberDeclaration, uint32_t AlignInBits)
+ Metadata *StaticDataMemberDeclaration, Metadata *TemplateParams,
+ uint32_t AlignInBits)
: Scope(Scope), Name(Name), LinkageName(LinkageName), File(File),
Line(Line), Type(Type), IsLocalToUnit(IsLocalToUnit),
IsDefinition(IsDefinition),
StaticDataMemberDeclaration(StaticDataMemberDeclaration),
- AlignInBits(AlignInBits) {}
+ TemplateParams(TemplateParams), AlignInBits(AlignInBits) {}
MDNodeKeyImpl(const DIGlobalVariable *N)
: Scope(N->getRawScope()), Name(N->getRawName()),
LinkageName(N->getRawLinkageName()), File(N->getRawFile()),
Line(N->getLine()), Type(N->getRawType()),
IsLocalToUnit(N->isLocalToUnit()), IsDefinition(N->isDefinition()),
StaticDataMemberDeclaration(N->getRawStaticDataMemberDeclaration()),
+ TemplateParams(N->getRawTemplateParams()),
AlignInBits(N->getAlignInBits()) {}
bool isKeyOf(const DIGlobalVariable *RHS) const {
@@ -892,6 +893,7 @@ template <> struct MDNodeKeyImpl<DIGlobalVariable> {
IsDefinition == RHS->isDefinition() &&
StaticDataMemberDeclaration ==
RHS->getRawStaticDataMemberDeclaration() &&
+ TemplateParams == RHS->getRawTemplateParams() &&
AlignInBits == RHS->getAlignInBits();
}
diff --git a/lib/IR/LegacyPassManager.cpp b/lib/IR/LegacyPassManager.cpp
index 54d602d926e5..01d14f17bba5 100644
--- a/lib/IR/LegacyPassManager.cpp
+++ b/lib/IR/LegacyPassManager.cpp
@@ -20,6 +20,7 @@
#include "llvm/IR/LegacyPassManagers.h"
#include "llvm/IR/LegacyPassNameParser.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/PassTimingInfo.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -99,27 +100,31 @@ static cl::list<std::string>
/// This is a helper to determine whether to print IR before or
/// after a pass.
-static bool ShouldPrintBeforeOrAfterPass(const PassInfo *PI,
+bool llvm::shouldPrintBeforePass() {
+ return PrintBeforeAll || !PrintBefore.empty();
+}
+
+bool llvm::shouldPrintAfterPass() {
+ return PrintAfterAll || !PrintAfter.empty();
+}
+
+static bool ShouldPrintBeforeOrAfterPass(StringRef PassID,
PassOptionList &PassesToPrint) {
for (auto *PassInf : PassesToPrint) {
if (PassInf)
- if (PassInf->getPassArgument() == PI->getPassArgument()) {
+ if (PassInf->getPassArgument() == PassID) {
return true;
}
}
return false;
}
-/// This is a utility to check whether a pass should have IR dumped
-/// before it.
-static bool ShouldPrintBeforePass(const PassInfo *PI) {
- return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PI, PrintBefore);
+bool llvm::shouldPrintBeforePass(StringRef PassID) {
+ return PrintBeforeAll || ShouldPrintBeforeOrAfterPass(PassID, PrintBefore);
}
-/// This is a utility to check whether a pass should have IR dumped
-/// after it.
-static bool ShouldPrintAfterPass(const PassInfo *PI) {
- return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PI, PrintAfter);
+bool llvm::shouldPrintAfterPass(StringRef PassID) {
+ return PrintAfterAll || ShouldPrintBeforeOrAfterPass(PassID, PrintAfter);
}
bool llvm::forcePrintModuleIR() { return PrintModuleScope; }
@@ -135,34 +140,32 @@ bool PMDataManager::isPassDebuggingExecutionsOrMore() const {
return PassDebugging >= Executions;
}
-unsigned PMDataManager::initSizeRemarkInfo(Module &M) {
+unsigned PMDataManager::initSizeRemarkInfo(
+ Module &M, StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount) {
// Only calculate getInstructionCount if the size-info remark is requested.
- return M.getInstructionCount();
-}
-
-void PMDataManager::emitInstrCountChangedRemark(Pass *P, Module &M,
- unsigned CountBefore) {
- // We need a function containing at least one basic block in order to output
- // remarks. Since it's possible that the first function in the module doesn't
- // actually contain a basic block, we have to go and find one that's suitable
- // for emitting remarks.
- auto It = std::find_if(M.begin(), M.end(),
- [](const Function &Fn) { return !Fn.empty(); });
-
- // Didn't find a function. Quit.
- if (It == M.end())
- return;
-
- // We found a function containing at least one basic block.
- Function *F = &*It;
+ unsigned InstrCount = 0;
- // How many instructions are in the module now?
- unsigned CountAfter = M.getInstructionCount();
+ // Collect instruction counts for every function. We'll use this to emit
+ // per-function size remarks later.
+ for (Function &F : M) {
+ unsigned FCount = F.getInstructionCount();
- // If there was no change, don't emit a remark.
- if (CountBefore == CountAfter)
- return;
+ // Insert a record into FunctionToInstrCount keeping track of the current
+ // size of the function as the first member of a pair. Set the second
+ // member to 0; if the function is deleted by the pass, then when we get
+ // here, we'll be able to let the user know that F no longer contributes to
+ // the module.
+ FunctionToInstrCount[F.getName().str()] =
+ std::pair<unsigned, unsigned>(FCount, 0);
+ InstrCount += FCount;
+ }
+ return InstrCount;
+}
+void PMDataManager::emitInstrCountChangedRemark(
+ Pass *P, Module &M, int64_t Delta, unsigned CountBefore,
+ StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount,
+ Function *F) {
// If it's a pass manager, don't emit a remark. (This hinges on the assumption
// that the only passes that return non-null with getAsPMDataManager are pass
// managers.) The reason we have to do this is to avoid emitting remarks for
@@ -170,11 +173,53 @@ void PMDataManager::emitInstrCountChangedRemark(Pass *P, Module &M,
if (P->getAsPMDataManager())
return;
- // Compute a possibly negative delta between the instruction count before
- // running P, and after running P.
- int64_t Delta =
- static_cast<int64_t>(CountAfter) - static_cast<int64_t>(CountBefore);
+ // Set to true if this isn't a module pass or CGSCC pass.
+ bool CouldOnlyImpactOneFunction = (F != nullptr);
+
+ // Helper lambda that updates the changes to the size of some function.
+ auto UpdateFunctionChanges =
+ [&FunctionToInstrCount](Function &MaybeChangedFn) {
+ // Update the total module count.
+ unsigned FnSize = MaybeChangedFn.getInstructionCount();
+ auto It = FunctionToInstrCount.find(MaybeChangedFn.getName());
+
+ // If we created a new function, then we need to add it to the map and
+ // say that it changed from 0 instructions to FnSize.
+ if (It == FunctionToInstrCount.end()) {
+ FunctionToInstrCount[MaybeChangedFn.getName()] =
+ std::pair<unsigned, unsigned>(0, FnSize);
+ return;
+ }
+ // Insert the new function size into the second member of the pair. This
+ // tells us whether or not this function changed in size.
+ It->second.second = FnSize;
+ };
+
+ // We need to initially update all of the function sizes.
+ // If no function was passed in, then we're either a module pass or an
+ // CGSCC pass.
+ if (!CouldOnlyImpactOneFunction)
+ std::for_each(M.begin(), M.end(), UpdateFunctionChanges);
+ else
+ UpdateFunctionChanges(*F);
+
+ // Do we have a function we can use to emit a remark?
+ if (!CouldOnlyImpactOneFunction) {
+ // We need a function containing at least one basic block in order to output
+ // remarks. Since it's possible that the first function in the module
+ // doesn't actually contain a basic block, we have to go and find one that's
+ // suitable for emitting remarks.
+ auto It = std::find_if(M.begin(), M.end(),
+ [](const Function &Fn) { return !Fn.empty(); });
+ // Didn't find a function. Quit.
+ if (It == M.end())
+ return;
+
+ // We found a function containing at least one basic block.
+ F = &*It;
+ }
+ int64_t CountAfter = static_cast<int64_t>(CountBefore) + Delta;
BasicBlock &BB = *F->begin();
OptimizationRemarkAnalysis R("size-info", "IRSizeChange",
DiagnosticLocation(), &BB);
@@ -188,6 +233,55 @@ void PMDataManager::emitInstrCountChangedRemark(Pass *P, Module &M,
<< "; Delta: "
<< DiagnosticInfoOptimizationBase::Argument("DeltaInstrCount", Delta);
F->getContext().diagnose(R); // Not using ORE for layering reasons.
+
+ // Emit per-function size change remarks separately.
+ std::string PassName = P->getPassName().str();
+
+ // Helper lambda that emits a remark when the size of a function has changed.
+ auto EmitFunctionSizeChangedRemark = [&FunctionToInstrCount, &F, &BB,
+ &PassName](const std::string &Fname) {
+ unsigned FnCountBefore, FnCountAfter;
+ std::pair<unsigned, unsigned> &Change = FunctionToInstrCount[Fname];
+ std::tie(FnCountBefore, FnCountAfter) = Change;
+ int64_t FnDelta = static_cast<int64_t>(FnCountAfter) -
+ static_cast<int64_t>(FnCountBefore);
+
+ if (FnDelta == 0)
+ return;
+
+ // FIXME: We shouldn't use BB for the location here. Unfortunately, because
+ // the function that we're looking at could have been deleted, we can't use
+ // it for the source location. We *want* remarks when a function is deleted
+ // though, so we're kind of stuck here as is. (This remark, along with the
+ // whole-module size change remarks really ought not to have source
+ // locations at all.)
+ OptimizationRemarkAnalysis FR("size-info", "FunctionIRSizeChange",
+ DiagnosticLocation(), &BB);
+ FR << DiagnosticInfoOptimizationBase::Argument("Pass", PassName)
+ << ": Function: "
+ << DiagnosticInfoOptimizationBase::Argument("Function", Fname)
+ << ": IR instruction count changed from "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsBefore",
+ FnCountBefore)
+ << " to "
+ << DiagnosticInfoOptimizationBase::Argument("IRInstrsAfter",
+ FnCountAfter)
+ << "; Delta: "
+ << DiagnosticInfoOptimizationBase::Argument("DeltaInstrCount", FnDelta);
+ F->getContext().diagnose(FR);
+
+ // Update the function size.
+ Change.first = FnCountAfter;
+ };
+
+ // Are we looking at more than one function? If so, emit remarks for all of
+ // the functions in the module. Otherwise, only emit one remark.
+ if (!CouldOnlyImpactOneFunction)
+ std::for_each(FunctionToInstrCount.keys().begin(),
+ FunctionToInstrCount.keys().end(),
+ EmitFunctionSizeChangedRemark);
+ else
+ EmitFunctionSizeChangedRemark(F->getName().str());
}
void PassManagerPrettyStackEntry::print(raw_ostream &OS) const {
@@ -494,65 +588,6 @@ char PassManagerImpl::ID = 0;
} // End of legacy namespace
} // End of llvm namespace
-namespace {
-
-//===----------------------------------------------------------------------===//
-/// TimingInfo Class - This class is used to calculate information about the
-/// amount of time each pass takes to execute. This only happens when
-/// -time-passes is enabled on the command line.
-///
-
-static ManagedStatic<sys::SmartMutex<true> > TimingInfoMutex;
-
-class TimingInfo {
- DenseMap<Pass*, Timer*> TimingData;
- TimerGroup TG;
-public:
- // Use 'create' member to get this.
- TimingInfo() : TG("pass", "... Pass execution timing report ...") {}
-
- // TimingDtor - Print out information about timing information
- ~TimingInfo() {
- // Delete all of the timers, which accumulate their info into the
- // TimerGroup.
- for (auto &I : TimingData)
- delete I.second;
- // TimerGroup is deleted next, printing the report.
- }
-
- // createTheTimeInfo - This method either initializes the TheTimeInfo pointer
- // to a non-null value (if the -time-passes option is enabled) or it leaves it
- // null. It may be called multiple times.
- static void createTheTimeInfo();
-
- // print - Prints out timing information and then resets the timers.
- void print() {
- TG.print(*CreateInfoOutputFile());
- }
-
- /// getPassTimer - Return the timer for the specified pass if it exists.
- Timer *getPassTimer(Pass *P) {
- if (P->getAsPMDataManager())
- return nullptr;
-
- sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
- Timer *&T = TimingData[P];
- if (!T) {
- StringRef PassName = P->getPassName();
- StringRef PassArgument;
- if (const PassInfo *PI = Pass::lookupPassInfo(P->getPassID()))
- PassArgument = PI->getPassArgument();
- T = new Timer(PassArgument.empty() ? PassName : PassArgument, PassName,
- TG);
- }
- return T;
- }
-};
-
-} // End of anon namespace
-
-static TimingInfo *TheTimeInfo;
-
//===----------------------------------------------------------------------===//
// PMTopLevelManager implementation
@@ -677,6 +712,8 @@ void PMTopLevelManager::schedulePass(Pass *P) {
// available at this point.
const PassInfo *PI = findAnalysisPassInfo(P->getPassID());
if (PI && PI->isAnalysis() && findAnalysisPass(P->getPassID())) {
+ // Remove any cached AnalysisUsage information.
+ AnUsageMap.erase(P);
delete P;
return;
}
@@ -747,7 +784,7 @@ void PMTopLevelManager::schedulePass(Pass *P) {
return;
}
- if (PI && !PI->isAnalysis() && ShouldPrintBeforePass(PI)) {
+ if (PI && !PI->isAnalysis() && shouldPrintBeforePass(PI->getPassArgument())) {
Pass *PP = P->createPrinterPass(
dbgs(), ("*** IR Dump Before " + P->getPassName() + " ***").str());
PP->assignPassManager(activeStack, getTopLevelPassManagerType());
@@ -756,7 +793,7 @@ void PMTopLevelManager::schedulePass(Pass *P) {
// Add the requested pass to the best available pass manager.
P->assignPassManager(activeStack, getTopLevelPassManagerType());
- if (PI && !PI->isAnalysis() && ShouldPrintAfterPass(PI)) {
+ if (PI && !PI->isAnalysis() && shouldPrintAfterPass(PI->getPassArgument())) {
Pass *PP = P->createPrinterPass(
dbgs(), ("*** IR Dump After " + P->getPassName() + " ***").str());
PP->assignPassManager(activeStack, getTopLevelPassManagerType());
@@ -1343,9 +1380,16 @@ bool BBPassManager::runOnFunction(Function &F) {
bool Changed = doInitialization(F);
Module &M = *F.getParent();
- unsigned InstrCount = 0;
+ unsigned InstrCount, BBSize = 0;
+ StringMap<std::pair<unsigned, unsigned>> FunctionToInstrCount;
bool EmitICRemark = M.shouldEmitInstrCountChangedRemark();
- for (BasicBlock &BB : F)
+ if (EmitICRemark)
+ InstrCount = initSizeRemarkInfo(M, FunctionToInstrCount);
+
+ for (BasicBlock &BB : F) {
+ // Collect the initial size of the basic block.
+ if (EmitICRemark)
+ BBSize = BB.size();
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
BasicBlockPass *BP = getContainedPass(Index);
bool LocalChanged = false;
@@ -1359,11 +1403,20 @@ bool BBPassManager::runOnFunction(Function &F) {
// If the pass crashes, remember this.
PassManagerPrettyStackEntry X(BP, BB);
TimeRegion PassTimer(getPassTimer(BP));
- if (EmitICRemark)
- InstrCount = initSizeRemarkInfo(M);
LocalChanged |= BP->runOnBasicBlock(BB);
- if (EmitICRemark)
- emitInstrCountChangedRemark(BP, M, InstrCount);
+ if (EmitICRemark) {
+ unsigned NewSize = BB.size();
+ // Update the size of the basic block, emit a remark, and update the
+ // size of the module.
+ if (NewSize != BBSize) {
+ int64_t Delta =
+ static_cast<int64_t>(NewSize) - static_cast<int64_t>(BBSize);
+ emitInstrCountChangedRemark(BP, M, Delta, InstrCount,
+ FunctionToInstrCount, &F);
+ InstrCount = static_cast<int64_t>(InstrCount) + Delta;
+ BBSize = NewSize;
+ }
+ }
}
Changed |= LocalChanged;
@@ -1378,6 +1431,7 @@ bool BBPassManager::runOnFunction(Function &F) {
recordAvailableAnalysis(BP);
removeDeadPasses(BP, BB.getName(), ON_BASICBLOCK_MSG);
}
+ }
return doFinalization(F) || Changed;
}
@@ -1525,7 +1579,6 @@ void FunctionPassManagerImpl::releaseMemoryOnTheFly() {
// Return true if any function is modified by a pass.
bool FunctionPassManagerImpl::run(Function &F) {
bool Changed = false;
- TimingInfo::createTheTimeInfo();
initializeAllAnalysisInfo();
for (unsigned Index = 0; Index < getNumContainedManagers(); ++Index) {
@@ -1567,8 +1620,15 @@ bool FPPassManager::runOnFunction(Function &F) {
// Collect inherited analysis from Module level pass manager.
populateInheritedAnalysis(TPM->activeStack);
- unsigned InstrCount = 0;
+ unsigned InstrCount, FunctionSize = 0;
+ StringMap<std::pair<unsigned, unsigned>> FunctionToInstrCount;
bool EmitICRemark = M.shouldEmitInstrCountChangedRemark();
+ // Collect the initial size of the module.
+ if (EmitICRemark) {
+ InstrCount = initSizeRemarkInfo(M, FunctionToInstrCount);
+ FunctionSize = F.getInstructionCount();
+ }
+
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
FunctionPass *FP = getContainedPass(Index);
bool LocalChanged = false;
@@ -1581,11 +1641,21 @@ bool FPPassManager::runOnFunction(Function &F) {
{
PassManagerPrettyStackEntry X(FP, F);
TimeRegion PassTimer(getPassTimer(FP));
- if (EmitICRemark)
- InstrCount = initSizeRemarkInfo(M);
LocalChanged |= FP->runOnFunction(F);
- if (EmitICRemark)
- emitInstrCountChangedRemark(FP, M, InstrCount);
+ if (EmitICRemark) {
+ unsigned NewSize = F.getInstructionCount();
+
+ // Update the size of the function, emit a remark, and update the size
+ // of the module.
+ if (NewSize != FunctionSize) {
+ int64_t Delta = static_cast<int64_t>(NewSize) -
+ static_cast<int64_t>(FunctionSize);
+ emitInstrCountChangedRemark(FP, M, Delta, InstrCount,
+ FunctionToInstrCount, &F);
+ InstrCount = static_cast<int64_t>(InstrCount) + Delta;
+ FunctionSize = NewSize;
+ }
+ }
}
Changed |= LocalChanged;
@@ -1649,8 +1719,15 @@ MPPassManager::runOnModule(Module &M) {
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index)
Changed |= getContainedPass(Index)->doInitialization(M);
- unsigned InstrCount = 0;
+ unsigned InstrCount, ModuleCount = 0;
+ StringMap<std::pair<unsigned, unsigned>> FunctionToInstrCount;
bool EmitICRemark = M.shouldEmitInstrCountChangedRemark();
+ // Collect the initial size of the module.
+ if (EmitICRemark) {
+ InstrCount = initSizeRemarkInfo(M, FunctionToInstrCount);
+ ModuleCount = InstrCount;
+ }
+
for (unsigned Index = 0; Index < getNumContainedPasses(); ++Index) {
ModulePass *MP = getContainedPass(Index);
bool LocalChanged = false;
@@ -1664,11 +1741,18 @@ MPPassManager::runOnModule(Module &M) {
PassManagerPrettyStackEntry X(MP, M);
TimeRegion PassTimer(getPassTimer(MP));
- if (EmitICRemark)
- InstrCount = initSizeRemarkInfo(M);
LocalChanged |= MP->runOnModule(M);
- if (EmitICRemark)
- emitInstrCountChangedRemark(MP, M, InstrCount);
+ if (EmitICRemark) {
+ // Update the size of the module.
+ ModuleCount = M.getInstructionCount();
+ if (ModuleCount != InstrCount) {
+ int64_t Delta = static_cast<int64_t>(ModuleCount) -
+ static_cast<int64_t>(InstrCount);
+ emitInstrCountChangedRemark(MP, M, Delta, InstrCount,
+ FunctionToInstrCount);
+ InstrCount = ModuleCount;
+ }
+ }
}
Changed |= LocalChanged;
@@ -1761,7 +1845,6 @@ Pass* MPPassManager::getOnTheFlyPass(Pass *MP, AnalysisID PI, Function &F){
/// whether any of the passes modifies the module, and if so, return true.
bool PassManagerImpl::run(Module &M) {
bool Changed = false;
- TimingInfo::createTheTimeInfo();
dumpArguments();
dumpPasses();
@@ -1806,41 +1889,6 @@ bool PassManager::run(Module &M) {
}
//===----------------------------------------------------------------------===//
-// TimingInfo implementation
-
-bool llvm::TimePassesIsEnabled = false;
-static cl::opt<bool, true> EnableTiming(
- "time-passes", cl::location(TimePassesIsEnabled), cl::Hidden,
- cl::desc("Time each pass, printing elapsed time for each on exit"));
-
-// createTheTimeInfo - This method either initializes the TheTimeInfo pointer to
-// a non-null value (if the -time-passes option is enabled) or it leaves it
-// null. It may be called multiple times.
-void TimingInfo::createTheTimeInfo() {
- if (!TimePassesIsEnabled || TheTimeInfo) return;
-
- // Constructed the first time this is called, iff -time-passes is enabled.
- // This guarantees that the object will be constructed before static globals,
- // thus it will be destroyed before them.
- static ManagedStatic<TimingInfo> TTI;
- TheTimeInfo = &*TTI;
-}
-
-/// If TimingInfo is enabled then start pass timer.
-Timer *llvm::getPassTimer(Pass *P) {
- if (TheTimeInfo)
- return TheTimeInfo->getPassTimer(P);
- return nullptr;
-}
-
-/// If timing is enabled, report the times collected up to now and then reset
-/// them.
-void llvm::reportAndResetTimings() {
- if (TheTimeInfo)
- TheTimeInfo->print();
-}
-
-//===----------------------------------------------------------------------===//
// PMStack implementation
//
diff --git a/lib/IR/MDBuilder.cpp b/lib/IR/MDBuilder.cpp
index 1bb23c0330f3..3fa541f1b535 100644
--- a/lib/IR/MDBuilder.cpp
+++ b/lib/IR/MDBuilder.cpp
@@ -260,8 +260,9 @@ MDNode *MDBuilder::createMutableTBAAAccessTag(MDNode *Tag) {
}
MDNode *MDBuilder::createIrrLoopHeaderWeight(uint64_t Weight) {
- SmallVector<Metadata *, 2> Vals(2);
- Vals[0] = createString("loop_header_weight");
- Vals[1] = createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight));
+ Metadata *Vals[] = {
+ createString("loop_header_weight"),
+ createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
+ };
return MDNode::get(Context, Vals);
}
diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp
index 83a22d95bd81..5536c2497f1e 100644
--- a/lib/IR/Metadata.cpp
+++ b/lib/IR/Metadata.cpp
@@ -237,7 +237,7 @@ void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
// Copy out uses since UseMap will get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
- llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
});
for (const auto &Pair : Uses) {
@@ -290,7 +290,7 @@ void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
// Copy out uses since UseMap could get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
- llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) {
+ llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second;
});
UseMap.clear();
@@ -1484,7 +1484,7 @@ void GlobalObject::copyMetadata(const GlobalObject *Other, unsigned Offset) {
std::vector<uint64_t> Elements(OrigElements.size() + 2);
Elements[0] = dwarf::DW_OP_plus_uconst;
Elements[1] = Offset;
- std::copy(OrigElements.begin(), OrigElements.end(), Elements.begin() + 2);
+ llvm::copy(OrigElements, Elements.begin() + 2);
E = DIExpression::get(getContext(), Elements);
Attachment = DIGlobalVariableExpression::get(getContext(), GV, E);
}
diff --git a/lib/IR/Module.cpp b/lib/IR/Module.cpp
index f18024063533..93f27304424f 100644
--- a/lib/IR/Module.cpp
+++ b/lib/IR/Module.cpp
@@ -13,6 +13,7 @@
#include "llvm/IR/Module.h"
#include "SymbolTableListTraitsImpl.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -45,6 +46,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/RandomNumberGenerator.h"
+#include "llvm/Support/VersionTuple.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
@@ -145,7 +147,8 @@ Constant *Module::getOrInsertFunction(StringRef Name, FunctionType *Ty,
GlobalValue *F = getNamedValue(Name);
if (!F) {
// Nope, add it
- Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage, Name);
+ Function *New = Function::Create(Ty, GlobalVariable::ExternalLinkage,
+ DL.getProgramAddressSpace(), Name);
if (!New->isIntrinsic()) // Intrinsics get attrs set on construction
New->setAttributes(AttributeList);
FunctionList.push_back(New);
@@ -154,8 +157,9 @@ Constant *Module::getOrInsertFunction(StringRef Name, FunctionType *Ty,
// If the function exists but has the wrong type, return a bitcast to the
// right type.
- if (F->getType() != PointerType::getUnqual(Ty))
- return ConstantExpr::getBitCast(F, PointerType::getUnqual(Ty));
+ auto *PTy = PointerType::get(Ty, F->getAddressSpace());
+ if (F->getType() != PTy)
+ return ConstantExpr::getBitCast(F, PTy);
// Otherwise, we just found the existing function or a prototype.
return F;
@@ -199,16 +203,14 @@ GlobalVariable *Module::getGlobalVariable(StringRef Name,
/// with a constantexpr cast to the right type.
/// 3. Finally, if the existing global is the correct declaration, return the
/// existing global.
-Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+Constant *Module::getOrInsertGlobal(
+ StringRef Name, Type *Ty,
+ function_ref<GlobalVariable *()> CreateGlobalCallback) {
// See if we have a definition for the specified global already.
GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(getNamedValue(Name));
- if (!GV) {
- // Nope, add it
- GlobalVariable *New =
- new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
- nullptr, Name);
- return New; // Return the new declaration.
- }
+ if (!GV)
+ GV = CreateGlobalCallback();
+ assert(GV && "The CreateGlobalCallback is expected to create a global");
// If the variable exists but has the wrong type, return a bitcast to the
// right type.
@@ -221,6 +223,14 @@ Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
return GV;
}
+// Overload to construct a global variable using its constructor's defaults.
+Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) {
+ return getOrInsertGlobal(Name, Ty, [&] {
+ return new GlobalVariable(*this, Ty, false, GlobalVariable::ExternalLinkage,
+ nullptr, Name);
+ });
+}
+
//===----------------------------------------------------------------------===//
// Methods for easy access to the global variables in the module.
//
@@ -505,6 +515,24 @@ void Module::setPIELevel(PIELevel::Level PL) {
addModuleFlag(ModFlagBehavior::Max, "PIE Level", PL);
}
+Optional<CodeModel::Model> Module::getCodeModel() const {
+ auto *Val = cast_or_null<ConstantAsMetadata>(getModuleFlag("Code Model"));
+
+ if (!Val)
+ return None;
+
+ return static_cast<CodeModel::Model>(
+ cast<ConstantInt>(Val->getValue())->getZExtValue());
+}
+
+void Module::setCodeModel(CodeModel::Model CL) {
+ // Linking object files with different code models is undefined behavior
+ // because the compiler would have to generate additional code (to span
+ // longer jumps) if a larger code model is used with a smaller one.
+ // Therefore we will treat attempts to mix code models as an error.
+ addModuleFlag(ModFlagBehavior::Error, "Code Model", CL);
+}
+
void Module::setProfileSummary(Metadata *M) {
addModuleFlag(ModFlagBehavior::Error, "ProfileSummary", M);
}
@@ -526,6 +554,45 @@ void Module::setRtLibUseGOT() {
addModuleFlag(ModFlagBehavior::Max, "RtLibUseGOT", 1);
}
+void Module::setSDKVersion(const VersionTuple &V) {
+ SmallVector<unsigned, 3> Entries;
+ Entries.push_back(V.getMajor());
+ if (auto Minor = V.getMinor()) {
+ Entries.push_back(*Minor);
+ if (auto Subminor = V.getSubminor())
+ Entries.push_back(*Subminor);
+ // Ignore the 'build' component as it can't be represented in the object
+ // file.
+ }
+ addModuleFlag(ModFlagBehavior::Warning, "SDK Version",
+ ConstantDataArray::get(Context, Entries));
+}
+
+VersionTuple Module::getSDKVersion() const {
+ auto *CM = dyn_cast_or_null<ConstantAsMetadata>(getModuleFlag("SDK Version"));
+ if (!CM)
+ return {};
+ auto *Arr = dyn_cast_or_null<ConstantDataArray>(CM->getValue());
+ if (!Arr)
+ return {};
+ auto getVersionComponent = [&](unsigned Index) -> Optional<unsigned> {
+ if (Index >= Arr->getNumElements())
+ return None;
+ return (unsigned)Arr->getElementAsInteger(Index);
+ };
+ auto Major = getVersionComponent(0);
+ if (!Major)
+ return {};
+ VersionTuple Result = VersionTuple(*Major);
+ if (auto Minor = getVersionComponent(1)) {
+ Result = VersionTuple(*Major, *Minor);
+ if (auto Subminor = getVersionComponent(2)) {
+ Result = VersionTuple(*Major, *Minor, *Subminor);
+ }
+ }
+ return Result;
+}
+
GlobalVariable *llvm::collectUsedGlobalVariables(
const Module &M, SmallPtrSetImpl<GlobalValue *> &Set, bool CompilerUsed) {
const char *Name = CompilerUsed ? "llvm.compiler.used" : "llvm.used";
diff --git a/lib/IR/ModuleSummaryIndex.cpp b/lib/IR/ModuleSummaryIndex.cpp
index 4c4466f9a902..46b88cd31779 100644
--- a/lib/IR/ModuleSummaryIndex.cpp
+++ b/lib/IR/ModuleSummaryIndex.cpp
@@ -14,11 +14,17 @@
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+#define DEBUG_TYPE "module-summary-index"
+
+STATISTIC(ReadOnlyLiveGVars,
+ "Number of live global variables marked read only");
+
FunctionSummary FunctionSummary::ExternalNode =
FunctionSummary::makeDummyFunctionSummary({});
bool ValueInfo::isDSOLocal() const {
@@ -30,6 +36,17 @@ bool ValueInfo::isDSOLocal() const {
});
}
+// Gets the number of immutable refs in RefEdgeList
+unsigned FunctionSummary::immutableRefCount() const {
+ // Here we take advantage of having all readonly references
+ // located in the end of the RefEdgeList.
+ auto Refs = refs();
+ unsigned ImmutableRefCnt = 0;
+ for (int I = Refs.size() - 1; I >= 0 && Refs[I].isReadOnly(); --I)
+ ImmutableRefCnt++;
+ return ImmutableRefCnt;
+}
+
// Collect for the given module the list of function it defines
// (GUID -> Summary).
void ModuleSummaryIndex::collectDefinedFunctionsForModule(
@@ -84,6 +101,80 @@ bool ModuleSummaryIndex::isGUIDLive(GlobalValue::GUID GUID) const {
return false;
}
+static void propagateConstantsToRefs(GlobalValueSummary *S) {
+ // If reference is not readonly then referenced summary is not
+ // readonly either. Note that:
+ // - All references from GlobalVarSummary are conservatively considered as
+ // not readonly. Tracking them properly requires more complex analysis
+ // then we have now.
+ //
+ // - AliasSummary objects have no refs at all so this function is a no-op
+ // for them.
+ for (auto &VI : S->refs()) {
+ if (VI.isReadOnly()) {
+ // We only mark refs as readonly when computing function summaries on
+ // analysis phase.
+ assert(isa<FunctionSummary>(S));
+ continue;
+ }
+ for (auto &Ref : VI.getSummaryList())
+ // If references to alias is not readonly then aliasee is not readonly
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(Ref->getBaseObject()))
+ GVS->setReadOnly(false);
+ }
+}
+
+// Do the constant propagation in combined index.
+// The goal of constant propagation is internalization of readonly
+// variables. To determine which variables are readonly and which
+// are not we take following steps:
+// - During analysis we speculatively assign readonly attribute to
+// all variables which can be internalized. When computing function
+// summary we also assign readonly attribute to a reference if
+// function doesn't modify referenced variable.
+//
+// - After computing dead symbols in combined index we do the constant
+// propagation. During this step we clear readonly attribute from
+// all variables which:
+// a. are preserved or can't be imported
+// b. referenced by any global variable initializer
+// c. referenced by a function and reference is not readonly
+//
+// Internalization itself happens in the backend after import is finished
+// See internalizeImmutableGVs.
+void ModuleSummaryIndex::propagateConstants(
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ for (auto &P : *this)
+ for (auto &S : P.second.SummaryList) {
+ if (!isGlobalValueLive(S.get()))
+ // We don't examine references from dead objects
+ continue;
+
+ // Global variable can't be marked read only if it is not eligible
+ // to import since we need to ensure that all external references
+ // get a local (imported) copy. It also can't be marked read only
+ // if it or any alias (since alias points to the same memory) are
+ // preserved or notEligibleToImport, since either of those means
+ // there could be writes that are not visible (because preserved
+ // means it could have external to DSO writes, and notEligibleToImport
+ // means it could have writes via inline assembly leading it to be
+ // in the @llvm.*used).
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S->getBaseObject()))
+ // Here we intentionally pass S.get() not GVS, because S could be
+ // an alias.
+ if (!canImportGlobalVar(S.get()) || GUIDPreservedSymbols.count(P.first))
+ GVS->setReadOnly(false);
+ propagateConstantsToRefs(S.get());
+ }
+ if (llvm::AreStatisticsEnabled())
+ for (auto &P : *this)
+ if (P.second.SummaryList.size())
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(
+ P.second.SummaryList[0]->getBaseObject()))
+ if (isGlobalValueLive(GVS) && GVS->isReadOnly())
+ ReadOnlyLiveGVars++;
+}
+
// TODO: write a graphviz dumper for SCCs (see ModuleSummaryIndex::exportToDot)
// then delete this function and update its tests
LLVM_DUMP_METHOD
@@ -108,6 +199,7 @@ namespace {
struct Attributes {
void add(const Twine &Name, const Twine &Value,
const Twine &Comment = Twine());
+ void addComment(const Twine &Comment);
std::string getAsString() const;
std::vector<std::string> Attrs;
@@ -129,6 +221,10 @@ void Attributes::add(const Twine &Name, const Twine &Value,
A += Value.str();
A += "\"";
Attrs.push_back(A);
+ addComment(Comment);
+}
+
+void Attributes::addComment(const Twine &Comment) {
if (!Comment.isTriviallyEmpty()) {
if (Comments.empty())
Comments = " // ";
@@ -182,8 +278,9 @@ static std::string linkageToString(GlobalValue::LinkageTypes LT) {
static std::string fflagsToString(FunctionSummary::FFlags F) {
auto FlagValue = [](unsigned V) { return V ? '1' : '0'; };
- char FlagRep[] = {FlagValue(F.ReadNone), FlagValue(F.ReadOnly),
- FlagValue(F.NoRecurse), FlagValue(F.ReturnDoesNotAlias), 0};
+ char FlagRep[] = {FlagValue(F.ReadNone), FlagValue(F.ReadOnly),
+ FlagValue(F.NoRecurse), FlagValue(F.ReturnDoesNotAlias),
+ FlagValue(F.NoInline), 0};
return FlagRep;
}
@@ -198,9 +295,12 @@ static std::string getSummaryAttributes(GlobalValueSummary* GVS) {
", ffl: " + fflagsToString(FS->fflags());
}
+static std::string getNodeVisualName(GlobalValue::GUID Id) {
+ return std::string("@") + std::to_string(Id);
+}
+
static std::string getNodeVisualName(const ValueInfo &VI) {
- return VI.name().empty() ? std::string("@") + std::to_string(VI.getGUID())
- : VI.name().str();
+ return VI.name().empty() ? getNodeVisualName(VI.getGUID()) : VI.name().str();
}
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS) {
@@ -221,13 +321,25 @@ static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS) {
// specific module associated with it. Typically this is function
// or variable defined in native object or library.
static void defineExternalNode(raw_ostream &OS, const char *Pfx,
- const ValueInfo &VI) {
- auto StrId = std::to_string(VI.getGUID());
- OS << " " << StrId << " [label=\"" << getNodeVisualName(VI)
- << "\"]; // defined externally\n";
+ const ValueInfo &VI, GlobalValue::GUID Id) {
+ auto StrId = std::to_string(Id);
+ OS << " " << StrId << " [label=\"";
+
+ if (VI) {
+ OS << getNodeVisualName(VI);
+ } else {
+ OS << getNodeVisualName(Id);
+ }
+ OS << "\"]; // defined externally\n";
+}
+
+static bool hasReadOnlyFlag(const GlobalValueSummary *S) {
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(S))
+ return GVS->isReadOnly();
+ return false;
}
-void ModuleSummaryIndex::exportToDot(raw_ostream& OS) const {
+void ModuleSummaryIndex::exportToDot(raw_ostream &OS) const {
std::vector<Edge> CrossModuleEdges;
DenseMap<GlobalValue::GUID, std::vector<uint64_t>> NodeMap;
StringMap<GVSummaryMapTy> ModuleToDefinedGVS;
@@ -241,14 +353,18 @@ void ModuleSummaryIndex::exportToDot(raw_ostream& OS) const {
"_" + std::to_string(Id);
};
- auto DrawEdge = [&](const char *Pfx, int SrcMod, GlobalValue::GUID SrcId,
- int DstMod, GlobalValue::GUID DstId, int TypeOrHotness) {
- // 0 corresponds to alias edge, 1 to ref edge, 2 to call with unknown
- // hotness, ...
- TypeOrHotness += 2;
+ auto DrawEdge = [&](const char *Pfx, uint64_t SrcMod, GlobalValue::GUID SrcId,
+ uint64_t DstMod, GlobalValue::GUID DstId,
+ int TypeOrHotness) {
+ // 0 - alias
+ // 1 - reference
+ // 2 - constant reference
+ // Other value: (hotness - 3).
+ TypeOrHotness += 3;
static const char *EdgeAttrs[] = {
" [style=dotted]; // alias",
" [style=dashed]; // ref",
+ " [style=dashed,color=forestgreen]; // const-ref",
" // call (hotness : Unknown)",
" [color=blue]; // call (hotness : Cold)",
" // call (hotness : None)",
@@ -291,6 +407,8 @@ void ModuleSummaryIndex::exportToDot(raw_ostream& OS) const {
A.add("shape", "box");
} else {
A.add("shape", "Mrecord", "variable");
+ if (Flags.Live && hasReadOnlyFlag(SummaryIt.second))
+ A.addComment("immutable");
}
auto VI = getValueInfo(SummaryIt.first);
@@ -308,13 +426,20 @@ void ModuleSummaryIndex::exportToDot(raw_ostream& OS) const {
for (auto &SummaryIt : GVSMap) {
auto *GVS = SummaryIt.second;
for (auto &R : GVS->refs())
- Draw(SummaryIt.first, R.getGUID(), -1);
+ Draw(SummaryIt.first, R.getGUID(), R.isReadOnly() ? -1 : -2);
if (auto *AS = dyn_cast_or_null<AliasSummary>(SummaryIt.second)) {
- auto AliaseeOrigId = AS->getAliasee().getOriginalName();
- auto AliaseeId = getGUIDFromOriginalID(AliaseeOrigId);
-
- Draw(SummaryIt.first, AliaseeId ? AliaseeId : AliaseeOrigId, -2);
+ GlobalValue::GUID AliaseeId;
+ if (AS->hasAliaseeGUID())
+ AliaseeId = AS->getAliaseeGUID();
+ else {
+ auto AliaseeOrigId = AS->getAliasee().getOriginalName();
+ AliaseeId = getGUIDFromOriginalID(AliaseeOrigId);
+ if (!AliaseeId)
+ AliaseeId = AliaseeOrigId;
+ }
+
+ Draw(SummaryIt.first, AliaseeId, -3);
continue;
}
@@ -330,7 +455,7 @@ void ModuleSummaryIndex::exportToDot(raw_ostream& OS) const {
for (auto &E : CrossModuleEdges) {
auto &ModList = NodeMap[E.Dst];
if (ModList.empty()) {
- defineExternalNode(OS, " ", getValueInfo(E.Dst));
+ defineExternalNode(OS, " ", getValueInfo(E.Dst), E.Dst);
// Add fake module to the list to draw an edge to an external node
// in the loop below.
ModList.push_back(-1);
diff --git a/lib/IR/PassInstrumentation.cpp b/lib/IR/PassInstrumentation.cpp
new file mode 100644
index 000000000000..5aa2bc6d895e
--- /dev/null
+++ b/lib/IR/PassInstrumentation.cpp
@@ -0,0 +1,22 @@
+//===- PassInstrumentation.cpp - Pass Instrumentation interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the implementation of PassInstrumentation class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+AnalysisKey PassInstrumentationAnalysis::Key;
+
+} // namespace llvm
diff --git a/lib/IR/PassTimingInfo.cpp b/lib/IR/PassTimingInfo.cpp
new file mode 100644
index 000000000000..40b3977ecbd9
--- /dev/null
+++ b/lib/IR/PassTimingInfo.cpp
@@ -0,0 +1,268 @@
+//===- PassTimingInfo.cpp - LLVM Pass Timing Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LLVM Pass Timing infrastructure for both
+// new and legacy pass managers.
+//
+// PassTimingInfo Class - This class is used to calculate information about the
+// amount of time each pass takes to execute. This only happens when
+// -time-passes is enabled on the command line.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/PassTimingInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <string>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "time-passes"
+
+namespace llvm {
+
+bool TimePassesIsEnabled = false;
+
+static cl::opt<bool, true> EnableTiming(
+ "time-passes", cl::location(TimePassesIsEnabled), cl::Hidden,
+ cl::desc("Time each pass, printing elapsed time for each on exit"));
+
+namespace {
+namespace legacy {
+
+//===----------------------------------------------------------------------===//
+// Legacy pass manager's PassTimingInfo implementation
+
+/// Provides an interface for collecting pass timing information.
+///
+/// It was intended to be generic but now we decided to split
+/// interfaces completely. This is now exclusively for legacy-pass-manager use.
+class PassTimingInfo {
+public:
+ using PassInstanceID = void *;
+
+private:
+ StringMap<unsigned> PassIDCountMap; ///< Map that counts instances of passes
+ DenseMap<PassInstanceID, std::unique_ptr<Timer>> TimingData; ///< timers for pass instances
+ TimerGroup TG;
+
+public:
+ /// Default constructor for yet-inactive timeinfo.
+ /// Use \p init() to activate it.
+ PassTimingInfo();
+
+ /// Print out timing information and release timers.
+ ~PassTimingInfo();
+
+ /// Initializes the static \p TheTimeInfo member to a non-null value when
+ /// -time-passes is enabled. Leaves it null otherwise.
+ ///
+ /// This method may be called multiple times.
+ static void init();
+
+ /// Prints out timing information and then resets the timers.
+ void print();
+
+ /// Returns the timer for the specified pass if it exists.
+ Timer *getPassTimer(Pass *, PassInstanceID);
+
+ static PassTimingInfo *TheTimeInfo;
+
+private:
+ Timer *newPassTimer(StringRef PassID, StringRef PassDesc);
+};
+
+static ManagedStatic<sys::SmartMutex<true>> TimingInfoMutex;
+
+PassTimingInfo::PassTimingInfo()
+ : TG("pass", "... Pass execution timing report ...") {}
+
+PassTimingInfo::~PassTimingInfo() {
+ // Deleting the timers accumulates their info into the TG member.
+ // Then TG member is (implicitly) deleted, actually printing the report.
+ TimingData.clear();
+}
+
+void PassTimingInfo::init() {
+ if (!TimePassesIsEnabled || TheTimeInfo)
+ return;
+
+ // Constructed the first time this is called, iff -time-passes is enabled.
+ // This guarantees that the object will be constructed after static globals,
+ // thus it will be destroyed before them.
+ static ManagedStatic<PassTimingInfo> TTI;
+ TheTimeInfo = &*TTI;
+}
+
+/// Prints out timing information and then resets the timers.
+void PassTimingInfo::print() { TG.print(*CreateInfoOutputFile()); }
+
+Timer *PassTimingInfo::newPassTimer(StringRef PassID, StringRef PassDesc) {
+ unsigned &num = PassIDCountMap[PassID];
+ num++;
+ // Appending description with a pass-instance number for all but the first one
+ std::string PassDescNumbered =
+ num <= 1 ? PassDesc.str() : formatv("{0} #{1}", PassDesc, num).str();
+ return new Timer(PassID, PassDescNumbered, TG);
+}
+
+Timer *PassTimingInfo::getPassTimer(Pass *P, PassInstanceID Pass) {
+ if (P->getAsPMDataManager())
+ return nullptr;
+
+ init();
+ sys::SmartScopedLock<true> Lock(*TimingInfoMutex);
+ std::unique_ptr<Timer> &T = TimingData[Pass];
+
+ if (!T) {
+ StringRef PassName = P->getPassName();
+ StringRef PassArgument;
+ if (const PassInfo *PI = Pass::lookupPassInfo(P->getPassID()))
+ PassArgument = PI->getPassArgument();
+ T.reset(newPassTimer(PassArgument.empty() ? PassName : PassArgument, PassName));
+ }
+ return T.get();
+}
+
+PassTimingInfo *PassTimingInfo::TheTimeInfo;
+} // namespace legacy
+} // namespace
+
+Timer *getPassTimer(Pass *P) {
+ legacy::PassTimingInfo::init();
+ if (legacy::PassTimingInfo::TheTimeInfo)
+ return legacy::PassTimingInfo::TheTimeInfo->getPassTimer(P, P);
+ return nullptr;
+}
+
+/// If timing is enabled, report the times collected up to now and then reset
+/// them.
+void reportAndResetTimings() {
+ if (legacy::PassTimingInfo::TheTimeInfo)
+ legacy::PassTimingInfo::TheTimeInfo->print();
+}
+
+//===----------------------------------------------------------------------===//
+// Pass timing handling for the New Pass Manager
+//===----------------------------------------------------------------------===//
+
+/// Returns the timer for the specified pass invocation of \p PassID.
+/// Each time it creates a new timer.
+Timer &TimePassesHandler::getPassTimer(StringRef PassID) {
+ // Bump counts for each request of the timer.
+ unsigned Count = nextPassID(PassID);
+
+ // Unconditionally appending description with a pass-invocation number.
+ std::string FullDesc = formatv("{0} #{1}", PassID, Count).str();
+
+ PassInvocationID UID{PassID, Count};
+ Timer *T = new Timer(PassID, FullDesc, TG);
+ auto Pair = TimingData.try_emplace(UID, T);
+ assert(Pair.second && "should always create a new timer");
+ return *(Pair.first->second.get());
+}
+
+TimePassesHandler::TimePassesHandler(bool Enabled)
+ : TG("pass", "... Pass execution timing report ..."), Enabled(Enabled) {}
+
+void TimePassesHandler::print() { TG.print(*CreateInfoOutputFile()); }
+
+LLVM_DUMP_METHOD void TimePassesHandler::dump() const {
+ dbgs() << "Dumping timers for " << getTypeName<TimePassesHandler>()
+ << ":\n\tRunning:\n";
+ for (auto &I : TimingData) {
+ const Timer *MyTimer = I.second.get();
+ if (!MyTimer || MyTimer->isRunning())
+ dbgs() << "\tTimer " << MyTimer << " for pass " << I.first.first << "("
+ << I.first.second << ")\n";
+ }
+ dbgs() << "\tTriggered:\n";
+ for (auto &I : TimingData) {
+ const Timer *MyTimer = I.second.get();
+ if (!MyTimer || (MyTimer->hasTriggered() && !MyTimer->isRunning()))
+ dbgs() << "\tTimer " << MyTimer << " for pass " << I.first.first << "("
+ << I.first.second << ")\n";
+ }
+}
+
+void TimePassesHandler::startTimer(StringRef PassID) {
+ Timer &MyTimer = getPassTimer(PassID);
+ TimerStack.push_back(&MyTimer);
+ if (!MyTimer.isRunning())
+ MyTimer.startTimer();
+}
+
+void TimePassesHandler::stopTimer(StringRef PassID) {
+ assert(TimerStack.size() > 0 && "empty stack in popTimer");
+ Timer *MyTimer = TimerStack.pop_back_val();
+ assert(MyTimer && "timer should be present");
+ if (MyTimer->isRunning())
+ MyTimer->stopTimer();
+}
+
+static bool matchPassManager(StringRef PassID) {
+ size_t prefix_pos = PassID.find('<');
+ if (prefix_pos == StringRef::npos)
+ return false;
+ StringRef Prefix = PassID.substr(0, prefix_pos);
+ return Prefix.endswith("PassManager") || Prefix.endswith("PassAdaptor") ||
+ Prefix.endswith("AnalysisManagerProxy");
+}
+
+bool TimePassesHandler::runBeforePass(StringRef PassID) {
+ if (matchPassManager(PassID))
+ return true;
+
+ startTimer(PassID);
+
+ LLVM_DEBUG(dbgs() << "after runBeforePass(" << PassID << ")\n");
+ LLVM_DEBUG(dump());
+
+ // we are not going to skip this pass, thus return true.
+ return true;
+}
+
+void TimePassesHandler::runAfterPass(StringRef PassID) {
+ if (matchPassManager(PassID))
+ return;
+
+ stopTimer(PassID);
+
+ LLVM_DEBUG(dbgs() << "after runAfterPass(" << PassID << ")\n");
+ LLVM_DEBUG(dump());
+}
+
+void TimePassesHandler::registerCallbacks(PassInstrumentationCallbacks &PIC) {
+ if (!Enabled)
+ return;
+
+ PIC.registerBeforePassCallback(
+ [this](StringRef P, Any) { return this->runBeforePass(P); });
+ PIC.registerAfterPassCallback(
+ [this](StringRef P, Any) { this->runAfterPass(P); });
+ PIC.registerAfterPassInvalidatedCallback(
+ [this](StringRef P) { this->runAfterPass(P); });
+ PIC.registerBeforeAnalysisCallback(
+ [this](StringRef P, Any) { this->runBeforePass(P); });
+ PIC.registerAfterAnalysisCallback(
+ [this](StringRef P, Any) { this->runAfterPass(P); });
+}
+
+} // namespace llvm
diff --git a/lib/IR/SafepointIRVerifier.cpp b/lib/IR/SafepointIRVerifier.cpp
index 6f73126be738..12ada1320225 100644
--- a/lib/IR/SafepointIRVerifier.cpp
+++ b/lib/IR/SafepointIRVerifier.cpp
@@ -92,6 +92,7 @@ public:
Listed = true;
}
}
+ (void)Listed;
assert(Listed && "basic block is not found among incoming blocks");
return false;
}
@@ -133,7 +134,7 @@ public:
// Top-down walk of the dominator tree
ReversePostOrderTraversal<const Function *> RPOT(&F);
for (const BasicBlock *BB : RPOT) {
- const TerminatorInst *TI = BB->getTerminator();
+ const Instruction *TI = BB->getTerminator();
assert(TI && "blocks must be well formed");
// For conditional branches, we can perform simple conditional propagation on
@@ -256,8 +257,7 @@ static bool containsGCPtrType(Type *Ty) {
if (ArrayType *AT = dyn_cast<ArrayType>(Ty))
return containsGCPtrType(AT->getElementType());
if (StructType *ST = dyn_cast<StructType>(Ty))
- return std::any_of(ST->subtypes().begin(), ST->subtypes().end(),
- containsGCPtrType);
+ return llvm::any_of(ST->elements(), containsGCPtrType);
return false;
}
diff --git a/lib/IR/Type.cpp b/lib/IR/Type.cpp
index 83016496ff7e..0fb079c5ab73 100644
--- a/lib/IR/Type.cpp
+++ b/lib/IR/Type.cpp
@@ -297,20 +297,26 @@ FunctionType::FunctionType(Type *Result, ArrayRef<Type*> Params,
FunctionType *FunctionType::get(Type *ReturnType,
ArrayRef<Type*> Params, bool isVarArg) {
LLVMContextImpl *pImpl = ReturnType->getContext().pImpl;
- FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg);
- auto I = pImpl->FunctionTypes.find_as(Key);
+ const FunctionTypeKeyInfo::KeyTy Key(ReturnType, Params, isVarArg);
FunctionType *FT;
-
- if (I == pImpl->FunctionTypes.end()) {
+ // Since we only want to allocate a fresh function type in case none is found
+ // and we don't want to perform two lookups (one for checking if existent and
+ // one for inserting the newly allocated one), here we instead lookup based on
+ // Key and update the reference to the function type in-place to a newly
+ // allocated one if not found.
+ auto Insertion = pImpl->FunctionTypes.insert_as(nullptr, Key);
+ if (Insertion.second) {
+ // The function type was not found. Allocate one and update FunctionTypes
+ // in-place.
FT = (FunctionType *)pImpl->TypeAllocator.Allocate(
sizeof(FunctionType) + sizeof(Type *) * (Params.size() + 1),
alignof(FunctionType));
new (FT) FunctionType(ReturnType, Params, isVarArg);
- pImpl->FunctionTypes.insert(FT);
+ *Insertion.first = FT;
} else {
- FT = *I;
+ // The function type was found. Just return it.
+ FT = *Insertion.first;
}
-
return FT;
}
@@ -336,18 +342,25 @@ bool FunctionType::isValidArgumentType(Type *ArgTy) {
StructType *StructType::get(LLVMContext &Context, ArrayRef<Type*> ETypes,
bool isPacked) {
LLVMContextImpl *pImpl = Context.pImpl;
- AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked);
- auto I = pImpl->AnonStructTypes.find_as(Key);
- StructType *ST;
+ const AnonStructTypeKeyInfo::KeyTy Key(ETypes, isPacked);
- if (I == pImpl->AnonStructTypes.end()) {
- // Value not found. Create a new type!
+ StructType *ST;
+ // Since we only want to allocate a fresh struct type in case none is found
+ // and we don't want to perform two lookups (one for checking if existent and
+ // one for inserting the newly allocated one), here we instead lookup based on
+ // Key and update the reference to the struct type in-place to a newly
+ // allocated one if not found.
+ auto Insertion = pImpl->AnonStructTypes.insert_as(nullptr, Key);
+ if (Insertion.second) {
+ // The struct type was not found. Allocate one and update AnonStructTypes
+ // in-place.
ST = new (Context.pImpl->TypeAllocator) StructType(Context);
ST->setSubclassData(SCDB_IsLiteral); // Literal struct.
ST->setBody(ETypes, isPacked);
- Context.pImpl->AnonStructTypes.insert(ST);
+ *Insertion.first = ST;
} else {
- ST = *I;
+ // The struct type was found. Just return it.
+ ST = *Insertion.first;
}
return ST;
diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp
index 295d6ecf0db0..80b993c89f7f 100644
--- a/lib/IR/Value.cpp
+++ b/lib/IR/Value.cpp
@@ -16,7 +16,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SetVector.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -130,20 +129,11 @@ void Value::destroyValueName() {
}
bool Value::hasNUses(unsigned N) const {
- const_use_iterator UI = use_begin(), E = use_end();
-
- for (; N; --N, ++UI)
- if (UI == E) return false; // Too few.
- return UI == E;
+ return hasNItems(use_begin(), use_end(), N);
}
bool Value::hasNUsesOrMore(unsigned N) const {
- const_use_iterator UI = use_begin(), E = use_end();
-
- for (; N; --N, ++UI)
- if (UI == E) return false; // Too few.
-
- return true;
+ return hasNItemsOrMore(use_begin(), use_end(), N);
}
bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
@@ -405,7 +395,7 @@ static bool contains(Value *Expr, Value *V) {
}
#endif // NDEBUG
-void Value::doRAUW(Value *New, bool NoMetadata) {
+void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
assert(!contains(New, this) &&
"this->replaceAllUsesWith(expr(this)) is NOT valid!");
@@ -415,7 +405,7 @@ void Value::doRAUW(Value *New, bool NoMetadata) {
// Notify all ValueHandles (if present) that this value is going away.
if (HasValueHandle)
ValueHandleBase::ValueIsRAUWd(this, New);
- if (!NoMetadata && isUsedByMetadata())
+ if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
ValueAsMetadata::handleRAUW(this, New);
while (!materialized_use_empty()) {
@@ -437,11 +427,11 @@ void Value::doRAUW(Value *New, bool NoMetadata) {
}
void Value::replaceAllUsesWith(Value *New) {
- doRAUW(New, false /* NoMetadata */);
+ doRAUW(New, ReplaceMetadataUses::Yes);
}
void Value::replaceNonMetadataUsesWith(Value *New) {
- doRAUW(New, true /* NoMetadata */);
+ doRAUW(New, ReplaceMetadataUses::No);
}
// Like replaceAllUsesWith except it does not handle constants or basic blocks.
@@ -512,8 +502,8 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
return V;
V = GA->getAliasee();
} else {
- if (auto CS = ImmutableCallSite(V)) {
- if (const Value *RV = CS.getReturnedArgOperand()) {
+ if (const auto *Call = dyn_cast<CallBase>(V)) {
+ if (const Value *RV = Call->getReturnedArgOperand()) {
V = RV;
continue;
}
@@ -521,9 +511,9 @@ static const Value *stripPointerCastsAndOffsets(const Value *V) {
// but it can't be marked with returned attribute, that's why it needs
// special case.
if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
- (CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
- CS.getIntrinsicID() == Intrinsic::strip_invariant_group)) {
- V = CS.getArgOperand(0);
+ (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
+ Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
+ V = Call->getArgOperand(0);
continue;
}
}
@@ -582,8 +572,8 @@ Value::stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
} else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
V = GA->getAliasee();
} else {
- if (auto CS = ImmutableCallSite(V))
- if (const Value *RV = CS.getReturnedArgOperand()) {
+ if (const auto *Call = dyn_cast<CallBase>(V))
+ if (const Value *RV = Call->getReturnedArgOperand()) {
V = RV;
continue;
}
@@ -617,10 +607,11 @@ uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
DerefBytes = A->getDereferenceableOrNullBytes();
CanBeNull = true;
}
- } else if (auto CS = ImmutableCallSite(this)) {
- DerefBytes = CS.getDereferenceableBytes(AttributeList::ReturnIndex);
+ } else if (const auto *Call = dyn_cast<CallBase>(this)) {
+ DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex);
if (DerefBytes == 0) {
- DerefBytes = CS.getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
+ DerefBytes =
+ Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
CanBeNull = true;
}
} else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
@@ -692,8 +683,8 @@ unsigned Value::getPointerAlignment(const DataLayout &DL) const {
if (AllocatedType->isSized())
Align = DL.getPrefTypeAlignment(AllocatedType);
}
- } else if (auto CS = ImmutableCallSite(this))
- Align = CS.getAttributes().getRetAlignment();
+ } else if (const auto *Call = dyn_cast<CallBase>(this))
+ Align = Call->getAttributes().getRetAlignment();
else if (const LoadInst *LI = dyn_cast<LoadInst>(this))
if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index e5231bb78a36..30e77b92009f 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -65,7 +65,6 @@
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
-#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constant.h"
@@ -140,21 +139,20 @@ private:
}
void Write(const Value *V) {
- if (!V)
- return;
+ if (V)
+ Write(*V);
+ }
+
+ void Write(const Value &V) {
if (isa<Instruction>(V)) {
- V->print(*OS, MST);
+ V.print(*OS, MST);
*OS << '\n';
} else {
- V->printAsOperand(*OS, true, MST);
+ V.printAsOperand(*OS, true, MST);
*OS << '\n';
}
}
- void Write(ImmutableCallSite CS) {
- Write(CS.getInstruction());
- }
-
void Write(const Metadata *MD) {
if (!MD)
return;
@@ -281,13 +279,16 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// Whether the current function has a DISubprogram attached to it.
bool HasDebugInfo = false;
+ /// Whether source was present on the first DIFile encountered in each CU.
+ DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
+
/// Stores the count of how many objects were passed to llvm.localescape for a
/// given function and the largest index passed to llvm.localrecover.
DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
// Maps catchswitches and cleanuppads that unwind to siblings to the
// terminators that indicate the unwind, used to detect cycles therein.
- MapVector<Instruction *, TerminatorInst *> SiblingFuncletInfo;
+ MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
/// Cache of constants visited in search of ConstantExprs.
SmallPtrSet<const Constant *, 32> ConstantExprVisited;
@@ -383,6 +384,7 @@ public:
visitModuleFlags(M);
visitModuleIdents(M);
+ visitModuleCommandLines(M);
verifyCompileUnits();
@@ -405,6 +407,7 @@ private:
void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
void visitComdat(const Comdat &C);
void visitModuleIdents(const Module &M);
+ void visitModuleCommandLines(const Module &M);
void visitModuleFlags(const Module &M);
void visitModuleFlag(const MDNode *Op,
DenseMap<const MDString *, const MDNode *> &SeenIDs,
@@ -443,6 +446,8 @@ private:
void visitBitCastInst(BitCastInst &I);
void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
void visitPHINode(PHINode &PN);
+ void visitCallBase(CallBase &Call);
+ void visitUnaryOperator(UnaryOperator &U);
void visitBinaryOperator(BinaryOperator &B);
void visitICmpInst(ICmpInst &IC);
void visitFCmpInst(FCmpInst &FC);
@@ -457,7 +462,7 @@ private:
void visitStoreInst(StoreInst &SI);
void verifyDominatesUse(Instruction &I, unsigned i);
void visitInstruction(Instruction &I);
- void visitTerminatorInst(TerminatorInst &I);
+ void visitTerminator(Instruction &I);
void visitBranchInst(BranchInst &BI);
void visitReturnInst(ReturnInst &RI);
void visitSwitchInst(SwitchInst &SI);
@@ -465,9 +470,9 @@ private:
void visitSelectInst(SelectInst &SI);
void visitUserOp1(Instruction &I);
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
- void visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS);
+ void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
- void visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII);
+ void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
void visitAtomicRMWInst(AtomicRMWInst &RMWI);
@@ -485,8 +490,7 @@ private:
void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
void visitCleanupReturnInst(CleanupReturnInst &CRI);
- void verifyCallSite(CallSite CS);
- void verifySwiftErrorCallSite(CallSite CS, const Value *SwiftErrorVal);
+ void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
void verifySwiftErrorValue(const Value *SwiftErrorVal);
void verifyMustTailCall(CallInst &CI);
bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
@@ -501,16 +505,16 @@ private:
void visitConstantExprsRecursively(const Constant *EntryC);
void visitConstantExpr(const ConstantExpr *CE);
- void verifyStatepoint(ImmutableCallSite CS);
+ void verifyStatepoint(const CallBase &Call);
void verifyFrameRecoverIndices();
void verifySiblingFuncletUnwinds();
- void verifyFragmentExpression(const DbgInfoIntrinsic &I);
+ void verifyFragmentExpression(const DbgVariableIntrinsic &I);
template <typename ValueOrMetadata>
void verifyFragmentExpression(const DIVariable &V,
DIExpression::FragmentInfo Fragment,
ValueOrMetadata *Desc);
- void verifyFnArgs(const DbgInfoIntrinsic &I);
+ void verifyFnArgs(const DbgVariableIntrinsic &I);
/// Module-level debug info verification...
void verifyCompileUnits();
@@ -518,6 +522,9 @@ private:
/// Module-level verification that all @llvm.experimental.deoptimize
/// declarations share the same calling convention.
void verifyDeoptimizeCallingConvs();
+
+ /// Verify all-or-nothing property of DIFile source attribute within a CU.
+ void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
};
} // end anonymous namespace
@@ -632,7 +639,8 @@ void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
StructType *STy = dyn_cast<StructType>(ATy->getElementType());
PointerType *FuncPtrTy =
- FunctionType::get(Type::getVoidTy(Context), false)->getPointerTo();
+ FunctionType::get(Type::getVoidTy(Context), false)->
+ getPointerTo(DL.getProgramAddressSpace());
// FIXME: Reject the 2-field form in LLVM 4.0.
Assert(STy &&
(STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
@@ -886,6 +894,8 @@ void Verifier::visitDIBasicType(const DIBasicType &N) {
AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
N.getTag() == dwarf::DW_TAG_unspecified_type,
"invalid tag", &N);
+ AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
+ "has conflicting flags", &N);
}
void Verifier::visitDIDerivedType(const DIDerivedType &N) {
@@ -1028,6 +1038,8 @@ void Verifier::visitDICompileUnit(const DICompileUnit &N) {
AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
N.getFile());
+ verifySourceDebugInfo(N, *N.getFile());
+
AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
"invalid emission kind", &N);
@@ -1105,6 +1117,8 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
+ if (N.getFile())
+ verifySourceDebugInfo(*N.getUnit(), *N.getFile());
} else {
// Subprogram declarations (part of the type hierarchy).
AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
@@ -1117,6 +1131,10 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
Op);
}
+
+ if (N.areAllCallsDescribed())
+ AssertDI(N.isDefinition(),
+ "DIFlagAllCallsDescribed must be attached to a definition");
}
void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
@@ -1223,6 +1241,8 @@ void Verifier::visitDILocalVariable(const DILocalVariable &N) {
AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
"local variable requires a valid scope", &N, N.getRawScope());
+ if (auto Ty = N.getType())
+ AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
}
void Verifier::visitDILabel(const DILabel &N) {
@@ -1295,6 +1315,24 @@ void Verifier::visitModuleIdents(const Module &M) {
}
}
+void Verifier::visitModuleCommandLines(const Module &M) {
+ const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
+ if (!CommandLines)
+ return;
+
+ // llvm.commandline takes a list of metadata entry. Each entry has only one
+ // string. Scan each llvm.commandline entry and make sure that this
+ // requirement is met.
+ for (const MDNode *N : CommandLines->operands()) {
+ Assert(N->getNumOperands() == 1,
+ "incorrect number of operands in llvm.commandline metadata", N);
+ Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
+ ("invalid value for llvm.commandline metadata entry operand"
+ "(the operand should be a string)"),
+ N->getOperand(0));
+ }
+}
+
void Verifier::visitModuleFlags(const Module &M) {
const NamedMDNode *Flags = M.getModuleFlagsMetadata();
if (!Flags) return;
@@ -1476,6 +1514,7 @@ static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
case Attribute::InaccessibleMemOnly:
case Attribute::InaccessibleMemOrArgMemOnly:
case Attribute::AllocSize:
+ case Attribute::SpeculativeLoadHardening:
case Attribute::Speculatable:
case Attribute::StrictFP:
return true;
@@ -1854,127 +1893,136 @@ bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
}
/// Verify that statepoint intrinsic is well formed.
-void Verifier::verifyStatepoint(ImmutableCallSite CS) {
- assert(CS.getCalledFunction() &&
- CS.getCalledFunction()->getIntrinsicID() ==
- Intrinsic::experimental_gc_statepoint);
-
- const Instruction &CI = *CS.getInstruction();
+void Verifier::verifyStatepoint(const CallBase &Call) {
+ assert(Call.getCalledFunction() &&
+ Call.getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::experimental_gc_statepoint);
- Assert(!CS.doesNotAccessMemory() && !CS.onlyReadsMemory() &&
- !CS.onlyAccessesArgMemory(),
+ Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
+ !Call.onlyAccessesArgMemory(),
"gc.statepoint must read and write all memory to preserve "
"reordering restrictions required by safepoint semantics",
- &CI);
+ Call);
- const Value *IDV = CS.getArgument(0);
+ const Value *IDV = Call.getArgOperand(0);
Assert(isa<ConstantInt>(IDV), "gc.statepoint ID must be a constant integer",
- &CI);
+ Call);
- const Value *NumPatchBytesV = CS.getArgument(1);
+ const Value *NumPatchBytesV = Call.getArgOperand(1);
Assert(isa<ConstantInt>(NumPatchBytesV),
"gc.statepoint number of patchable bytes must be a constant integer",
- &CI);
+ Call);
const int64_t NumPatchBytes =
cast<ConstantInt>(NumPatchBytesV)->getSExtValue();
assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
- Assert(NumPatchBytes >= 0, "gc.statepoint number of patchable bytes must be "
- "positive",
- &CI);
+ Assert(NumPatchBytes >= 0,
+ "gc.statepoint number of patchable bytes must be "
+ "positive",
+ Call);
- const Value *Target = CS.getArgument(2);
+ const Value *Target = Call.getArgOperand(2);
auto *PT = dyn_cast<PointerType>(Target->getType());
Assert(PT && PT->getElementType()->isFunctionTy(),
- "gc.statepoint callee must be of function pointer type", &CI, Target);
+ "gc.statepoint callee must be of function pointer type", Call, Target);
FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
- const Value *NumCallArgsV = CS.getArgument(3);
+ const Value *NumCallArgsV = Call.getArgOperand(3);
Assert(isa<ConstantInt>(NumCallArgsV),
"gc.statepoint number of arguments to underlying call "
"must be constant integer",
- &CI);
+ Call);
const int NumCallArgs = cast<ConstantInt>(NumCallArgsV)->getZExtValue();
Assert(NumCallArgs >= 0,
"gc.statepoint number of arguments to underlying call "
"must be positive",
- &CI);
+ Call);
const int NumParams = (int)TargetFuncType->getNumParams();
if (TargetFuncType->isVarArg()) {
Assert(NumCallArgs >= NumParams,
- "gc.statepoint mismatch in number of vararg call args", &CI);
+ "gc.statepoint mismatch in number of vararg call args", Call);
// TODO: Remove this limitation
Assert(TargetFuncType->getReturnType()->isVoidTy(),
"gc.statepoint doesn't support wrapping non-void "
"vararg functions yet",
- &CI);
+ Call);
} else
Assert(NumCallArgs == NumParams,
- "gc.statepoint mismatch in number of call args", &CI);
+ "gc.statepoint mismatch in number of call args", Call);
- const Value *FlagsV = CS.getArgument(4);
+ const Value *FlagsV = Call.getArgOperand(4);
Assert(isa<ConstantInt>(FlagsV),
- "gc.statepoint flags must be constant integer", &CI);
+ "gc.statepoint flags must be constant integer", Call);
const uint64_t Flags = cast<ConstantInt>(FlagsV)->getZExtValue();
Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
- "unknown flag used in gc.statepoint flags argument", &CI);
+ "unknown flag used in gc.statepoint flags argument", Call);
// Verify that the types of the call parameter arguments match
// the type of the wrapped callee.
+ AttributeList Attrs = Call.getAttributes();
for (int i = 0; i < NumParams; i++) {
Type *ParamType = TargetFuncType->getParamType(i);
- Type *ArgType = CS.getArgument(5 + i)->getType();
+ Type *ArgType = Call.getArgOperand(5 + i)->getType();
Assert(ArgType == ParamType,
"gc.statepoint call argument does not match wrapped "
"function type",
- &CI);
+ Call);
+
+ if (TargetFuncType->isVarArg()) {
+ AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
+ Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
+ "Attribute 'sret' cannot be used for vararg call arguments!",
+ Call);
+ }
}
const int EndCallArgsInx = 4 + NumCallArgs;
- const Value *NumTransitionArgsV = CS.getArgument(EndCallArgsInx+1);
+ const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
Assert(isa<ConstantInt>(NumTransitionArgsV),
"gc.statepoint number of transition arguments "
"must be constant integer",
- &CI);
+ Call);
const int NumTransitionArgs =
cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
Assert(NumTransitionArgs >= 0,
- "gc.statepoint number of transition arguments must be positive", &CI);
+ "gc.statepoint number of transition arguments must be positive", Call);
const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
- const Value *NumDeoptArgsV = CS.getArgument(EndTransitionArgsInx+1);
+ const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
Assert(isa<ConstantInt>(NumDeoptArgsV),
"gc.statepoint number of deoptimization arguments "
"must be constant integer",
- &CI);
+ Call);
const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
- Assert(NumDeoptArgs >= 0, "gc.statepoint number of deoptimization arguments "
- "must be positive",
- &CI);
+ Assert(NumDeoptArgs >= 0,
+ "gc.statepoint number of deoptimization arguments "
+ "must be positive",
+ Call);
const int ExpectedNumArgs =
7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
- Assert(ExpectedNumArgs <= (int)CS.arg_size(),
- "gc.statepoint too few arguments according to length fields", &CI);
+ Assert(ExpectedNumArgs <= (int)Call.arg_size(),
+ "gc.statepoint too few arguments according to length fields", Call);
// Check that the only uses of this gc.statepoint are gc.result or
// gc.relocate calls which are tied to this statepoint and thus part
// of the same statepoint sequence
- for (const User *U : CI.users()) {
- const CallInst *Call = dyn_cast<const CallInst>(U);
- Assert(Call, "illegal use of statepoint token", &CI, U);
- if (!Call) continue;
- Assert(isa<GCRelocateInst>(Call) || isa<GCResultInst>(Call),
+ for (const User *U : Call.users()) {
+ const CallInst *UserCall = dyn_cast<const CallInst>(U);
+ Assert(UserCall, "illegal use of statepoint token", Call, U);
+ if (!UserCall)
+ continue;
+ Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
"gc.result or gc.relocate are the only value uses "
"of a gc.statepoint",
- &CI, U);
- if (isa<GCResultInst>(Call)) {
- Assert(Call->getArgOperand(0) == &CI,
- "gc.result connected to wrong gc.statepoint", &CI, Call);
+ Call, U);
+ if (isa<GCResultInst>(UserCall)) {
+ Assert(UserCall->getArgOperand(0) == &Call,
+ "gc.result connected to wrong gc.statepoint", Call, UserCall);
} else if (isa<GCRelocateInst>(Call)) {
- Assert(Call->getArgOperand(0) == &CI,
- "gc.relocate connected to wrong gc.statepoint", &CI, Call);
+ Assert(UserCall->getArgOperand(0) == &Call,
+ "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
}
}
@@ -2001,7 +2049,7 @@ void Verifier::verifyFrameRecoverIndices() {
}
}
-static Instruction *getSuccPad(TerminatorInst *Terminator) {
+static Instruction *getSuccPad(Instruction *Terminator) {
BasicBlock *UnwindDest;
if (auto *II = dyn_cast<InvokeInst>(Terminator))
UnwindDest = II->getUnwindDest();
@@ -2020,7 +2068,7 @@ void Verifier::verifySiblingFuncletUnwinds() {
if (Visited.count(PredPad))
continue;
Active.insert(PredPad);
- TerminatorInst *Terminator = Pair.second;
+ Instruction *Terminator = Pair.second;
do {
Instruction *SuccPad = getSuccPad(Terminator);
if (Active.count(SuccPad)) {
@@ -2029,7 +2077,7 @@ void Verifier::verifySiblingFuncletUnwinds() {
SmallVector<Instruction *, 8> CycleNodes;
do {
CycleNodes.push_back(CyclePad);
- TerminatorInst *CycleTerminator = SiblingFuncletInfo[CyclePad];
+ Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
if (CycleTerminator != CyclePad)
CycleNodes.push_back(CycleTerminator);
CyclePad = getSuccPad(CycleTerminator);
@@ -2262,6 +2310,10 @@ void Verifier::visitFunction(const Function &F) {
if (!Seen.insert(DL).second)
continue;
+ Metadata *Parent = DL->getRawScope();
+ AssertDI(Parent && isa<DILocalScope>(Parent),
+ "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
+ Parent);
DILocalScope *Scope = DL->getInlinedAtScope();
if (Scope && !Seen.insert(Scope).second)
continue;
@@ -2293,7 +2345,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
if (isa<PHINode>(BB.front())) {
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
- llvm::sort(Preds.begin(), Preds.end());
+ llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) {
// Ensure that PHI nodes have at least one entry!
Assert(PN.getNumIncomingValues() != 0,
@@ -2311,7 +2363,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
Values.push_back(
std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
- llvm::sort(Values.begin(), Values.end());
+ llvm::sort(Values);
for (unsigned i = 0, e = Values.size(); i != e; ++i) {
// Check to make sure that if there is more than one entry for a
@@ -2340,7 +2392,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
}
}
-void Verifier::visitTerminatorInst(TerminatorInst &I) {
+void Verifier::visitTerminator(Instruction &I) {
// Ensure that terminators only exist at the end of the basic block.
Assert(&I == I.getParent()->getTerminator(),
"Terminator found in the middle of a basic block!", I.getParent());
@@ -2352,7 +2404,7 @@ void Verifier::visitBranchInst(BranchInst &BI) {
Assert(BI.getCondition()->getType()->isIntegerTy(1),
"Branch condition is not 'i1' type!", &BI, BI.getCondition());
}
- visitTerminatorInst(BI);
+ visitTerminator(BI);
}
void Verifier::visitReturnInst(ReturnInst &RI) {
@@ -2371,7 +2423,7 @@ void Verifier::visitReturnInst(ReturnInst &RI) {
// Check to make sure that the return value has necessary properties for
// terminators...
- visitTerminatorInst(RI);
+ visitTerminator(RI);
}
void Verifier::visitSwitchInst(SwitchInst &SI) {
@@ -2386,7 +2438,7 @@ void Verifier::visitSwitchInst(SwitchInst &SI) {
"Duplicate integer as switch case", &SI, Case.getCaseValue());
}
- visitTerminatorInst(SI);
+ visitTerminator(SI);
}
void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
@@ -2396,7 +2448,7 @@ void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
Assert(BI.getDestination(i)->getType()->isLabelTy(),
"Indirectbr destinations must all have pointer type!", &BI);
- visitTerminatorInst(BI);
+ visitTerminator(BI);
}
void Verifier::visitSelectInst(SelectInst &SI) {
@@ -2695,77 +2747,79 @@ void Verifier::visitPHINode(PHINode &PN) {
visitInstruction(PN);
}
-void Verifier::verifyCallSite(CallSite CS) {
- Instruction *I = CS.getInstruction();
-
- Assert(CS.getCalledValue()->getType()->isPointerTy(),
- "Called function must be a pointer!", I);
- PointerType *FPTy = cast<PointerType>(CS.getCalledValue()->getType());
+void Verifier::visitCallBase(CallBase &Call) {
+ Assert(Call.getCalledValue()->getType()->isPointerTy(),
+ "Called function must be a pointer!", Call);
+ PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
Assert(FPTy->getElementType()->isFunctionTy(),
- "Called function is not pointer to function type!", I);
+ "Called function is not pointer to function type!", Call);
- Assert(FPTy->getElementType() == CS.getFunctionType(),
- "Called function is not the same type as the call!", I);
+ Assert(FPTy->getElementType() == Call.getFunctionType(),
+ "Called function is not the same type as the call!", Call);
- FunctionType *FTy = CS.getFunctionType();
+ FunctionType *FTy = Call.getFunctionType();
// Verify that the correct number of arguments are being passed
if (FTy->isVarArg())
- Assert(CS.arg_size() >= FTy->getNumParams(),
- "Called function requires more parameters than were provided!", I);
+ Assert(Call.arg_size() >= FTy->getNumParams(),
+ "Called function requires more parameters than were provided!",
+ Call);
else
- Assert(CS.arg_size() == FTy->getNumParams(),
- "Incorrect number of arguments passed to called function!", I);
+ Assert(Call.arg_size() == FTy->getNumParams(),
+ "Incorrect number of arguments passed to called function!", Call);
// Verify that all arguments to the call match the function type.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- Assert(CS.getArgument(i)->getType() == FTy->getParamType(i),
+ Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
"Call parameter type does not match function signature!",
- CS.getArgument(i), FTy->getParamType(i), I);
+ Call.getArgOperand(i), FTy->getParamType(i), Call);
- AttributeList Attrs = CS.getAttributes();
+ AttributeList Attrs = Call.getAttributes();
- Assert(verifyAttributeCount(Attrs, CS.arg_size()),
- "Attribute after last parameter!", I);
+ Assert(verifyAttributeCount(Attrs, Call.arg_size()),
+ "Attribute after last parameter!", Call);
if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
// Don't allow speculatable on call sites, unless the underlying function
// declaration is also speculatable.
- Function *Callee
- = dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
+ Function *Callee =
+ dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
Assert(Callee && Callee->isSpeculatable(),
- "speculatable attribute may not apply to call sites", I);
+ "speculatable attribute may not apply to call sites", Call);
}
// Verify call attributes.
- verifyFunctionAttrs(FTy, Attrs, I);
+ verifyFunctionAttrs(FTy, Attrs, &Call);
// Conservatively check the inalloca argument.
// We have a bug if we can find that there is an underlying alloca without
// inalloca.
- if (CS.hasInAllocaArgument()) {
- Value *InAllocaArg = CS.getArgument(FTy->getNumParams() - 1);
+ if (Call.hasInAllocaArgument()) {
+ Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
Assert(AI->isUsedWithInAlloca(),
- "inalloca argument for call has mismatched alloca", AI, I);
+ "inalloca argument for call has mismatched alloca", AI, Call);
}
// For each argument of the callsite, if it has the swifterror argument,
// make sure the underlying alloca/parameter it comes from has a swifterror as
// well.
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
- if (CS.paramHasAttr(i, Attribute::SwiftError)) {
- Value *SwiftErrorArg = CS.getArgument(i);
+ if (Call.paramHasAttr(i, Attribute::SwiftError)) {
+ Value *SwiftErrorArg = Call.getArgOperand(i);
if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
Assert(AI->isSwiftError(),
- "swifterror argument for call has mismatched alloca", AI, I);
+ "swifterror argument for call has mismatched alloca", AI, Call);
continue;
}
auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
- Assert(ArgI, "swifterror argument should come from an alloca or parameter", SwiftErrorArg, I);
+ Assert(ArgI,
+ "swifterror argument should come from an alloca or parameter",
+ SwiftErrorArg, Call);
Assert(ArgI->hasSwiftErrorAttr(),
- "swifterror argument for call has mismatched parameter", ArgI, I);
+ "swifterror argument for call has mismatched parameter", ArgI,
+ Call);
}
if (FTy->isVarArg()) {
@@ -2781,90 +2835,97 @@ void Verifier::verifyCallSite(CallSite CS) {
}
// Check attributes on the varargs part.
- for (unsigned Idx = FTy->getNumParams(); Idx < CS.arg_size(); ++Idx) {
- Type *Ty = CS.getArgument(Idx)->getType();
+ for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
+ Type *Ty = Call.getArgOperand(Idx)->getType();
AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
- verifyParameterAttrs(ArgAttrs, Ty, I);
+ verifyParameterAttrs(ArgAttrs, Ty, &Call);
if (ArgAttrs.hasAttribute(Attribute::Nest)) {
- Assert(!SawNest, "More than one parameter has attribute nest!", I);
+ Assert(!SawNest, "More than one parameter has attribute nest!", Call);
SawNest = true;
}
if (ArgAttrs.hasAttribute(Attribute::Returned)) {
Assert(!SawReturned, "More than one parameter has attribute returned!",
- I);
+ Call);
Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
"Incompatible argument and return types for 'returned' "
"attribute",
- I);
+ Call);
SawReturned = true;
}
- Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
- "Attribute 'sret' cannot be used for vararg call arguments!", I);
+ // Statepoint intrinsic is vararg but the wrapped function may be not.
+ // Allow sret here and check the wrapped function in verifyStatepoint.
+ if (!Call.getCalledFunction() ||
+ Call.getCalledFunction()->getIntrinsicID() !=
+ Intrinsic::experimental_gc_statepoint)
+ Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
+ "Attribute 'sret' cannot be used for vararg call arguments!",
+ Call);
if (ArgAttrs.hasAttribute(Attribute::InAlloca))
- Assert(Idx == CS.arg_size() - 1, "inalloca isn't on the last argument!",
- I);
+ Assert(Idx == Call.arg_size() - 1,
+ "inalloca isn't on the last argument!", Call);
}
}
// Verify that there's no metadata unless it's a direct call to an intrinsic.
- if (CS.getCalledFunction() == nullptr ||
- !CS.getCalledFunction()->getName().startswith("llvm.")) {
+ if (!Call.getCalledFunction() ||
+ !Call.getCalledFunction()->getName().startswith("llvm.")) {
for (Type *ParamTy : FTy->params()) {
Assert(!ParamTy->isMetadataTy(),
- "Function has metadata parameter but isn't an intrinsic", I);
+ "Function has metadata parameter but isn't an intrinsic", Call);
Assert(!ParamTy->isTokenTy(),
- "Function has token parameter but isn't an intrinsic", I);
+ "Function has token parameter but isn't an intrinsic", Call);
}
}
// Verify that indirect calls don't return tokens.
- if (CS.getCalledFunction() == nullptr)
+ if (!Call.getCalledFunction())
Assert(!FTy->getReturnType()->isTokenTy(),
"Return type cannot be token for indirect call!");
- if (Function *F = CS.getCalledFunction())
+ if (Function *F = Call.getCalledFunction())
if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
- visitIntrinsicCallSite(ID, CS);
+ visitIntrinsicCall(ID, Call);
// Verify that a callsite has at most one "deopt", at most one "funclet" and
// at most one "gc-transition" operand bundle.
bool FoundDeoptBundle = false, FoundFuncletBundle = false,
FoundGCTransitionBundle = false;
- for (unsigned i = 0, e = CS.getNumOperandBundles(); i < e; ++i) {
- OperandBundleUse BU = CS.getOperandBundleAt(i);
+ for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
+ OperandBundleUse BU = Call.getOperandBundleAt(i);
uint32_t Tag = BU.getTagID();
if (Tag == LLVMContext::OB_deopt) {
- Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", I);
+ Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
FoundDeoptBundle = true;
} else if (Tag == LLVMContext::OB_gc_transition) {
Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
- I);
+ Call);
FoundGCTransitionBundle = true;
} else if (Tag == LLVMContext::OB_funclet) {
- Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", I);
+ Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
FoundFuncletBundle = true;
Assert(BU.Inputs.size() == 1,
- "Expected exactly one funclet bundle operand", I);
+ "Expected exactly one funclet bundle operand", Call);
Assert(isa<FuncletPadInst>(BU.Inputs.front()),
"Funclet bundle operands should correspond to a FuncletPadInst",
- I);
+ Call);
}
}
// Verify that each inlinable callsite of a debug-info-bearing function in a
// debug-info-bearing function has a debug location attached to it. Failure to
// do so causes assertion failures when the inliner sets up inline scope info.
- if (I->getFunction()->getSubprogram() && CS.getCalledFunction() &&
- CS.getCalledFunction()->getSubprogram())
- AssertDI(I->getDebugLoc(), "inlinable function call in a function with "
- "debug info must have a !dbg location",
- I);
+ if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
+ Call.getCalledFunction()->getSubprogram())
+ AssertDI(Call.getDebugLoc(),
+ "inlinable function call in a function with "
+ "debug info must have a !dbg location",
+ Call);
- visitInstruction(*I);
+ visitInstruction(Call);
}
/// Two types are "congruent" if they are identical, or if they are both pointer
@@ -2959,14 +3020,14 @@ void Verifier::verifyMustTailCall(CallInst &CI) {
}
void Verifier::visitCallInst(CallInst &CI) {
- verifyCallSite(&CI);
+ visitCallBase(CI);
if (CI.isMustTailCall())
verifyMustTailCall(CI);
}
void Verifier::visitInvokeInst(InvokeInst &II) {
- verifyCallSite(&II);
+ visitCallBase(II);
// Verify that the first non-PHI instruction of the unwind destination is an
// exception handling instruction.
@@ -2975,7 +3036,29 @@ void Verifier::visitInvokeInst(InvokeInst &II) {
"The unwind destination does not have an exception handling instruction!",
&II);
- visitTerminatorInst(II);
+ visitTerminator(II);
+}
+
+/// visitUnaryOperator - Check the argument to the unary operator.
+///
+void Verifier::visitUnaryOperator(UnaryOperator &U) {
+ Assert(U.getType() == U.getOperand(0)->getType(),
+ "Unary operators must have same type for"
+ "operands and result!",
+ &U);
+
+ switch (U.getOpcode()) {
+ // Check that floating-point arithmetic operators are only used with
+ // floating-point operands.
+ case Instruction::FNeg:
+ Assert(U.getType()->isFPOrFPVectorTy(),
+ "FNeg operator only works with float types!", &U);
+ break;
+ default:
+ llvm_unreachable("Unknown UnaryOperator opcode!");
+ }
+
+ visitInstruction(U);
}
/// visitBinaryOperator - Check that both arguments to the binary operator are
@@ -3131,6 +3214,12 @@ void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
"All GEP indices should be of integer type");
}
}
+
+ if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
+ Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
+ "GEP address space doesn't match type", &GEP);
+ }
+
visitInstruction(GEP);
}
@@ -3247,16 +3336,15 @@ void Verifier::visitStoreInst(StoreInst &SI) {
}
/// Check that SwiftErrorVal is used as a swifterror argument in CS.
-void Verifier::verifySwiftErrorCallSite(CallSite CS,
- const Value *SwiftErrorVal) {
+void Verifier::verifySwiftErrorCall(CallBase &Call,
+ const Value *SwiftErrorVal) {
unsigned Idx = 0;
- for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
- I != E; ++I, ++Idx) {
+ for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
if (*I == SwiftErrorVal) {
- Assert(CS.paramHasAttr(Idx, Attribute::SwiftError),
+ Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
"swifterror value when used in a callsite should be marked "
"with swifterror attribute",
- SwiftErrorVal, CS);
+ SwiftErrorVal, Call);
}
}
}
@@ -3275,10 +3363,8 @@ void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
Assert(StoreI->getOperand(1) == SwiftErrorVal,
"swifterror value should be the second operand when used "
"by stores", SwiftErrorVal, U);
- if (auto CallI = dyn_cast<CallInst>(U))
- verifySwiftErrorCallSite(const_cast<CallInst*>(CallI), SwiftErrorVal);
- if (auto II = dyn_cast<InvokeInst>(U))
- verifySwiftErrorCallSite(const_cast<InvokeInst*>(II), SwiftErrorVal);
+ if (auto *Call = dyn_cast<CallBase>(U))
+ verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
}
}
@@ -3341,17 +3427,19 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
"atomicrmw instructions must be atomic.", &RMWI);
Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
"atomicrmw instructions cannot be unordered.", &RMWI);
+ auto Op = RMWI.getOperation();
PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
Type *ElTy = PTy->getElementType();
- Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!",
+ Assert(ElTy->isIntegerTy(), "atomicrmw " +
+ AtomicRMWInst::getOperationName(Op) +
+ " operand must have integer type!",
&RMWI, ElTy);
checkAtomicMemAccessSize(ElTy, &RMWI);
Assert(ElTy == RMWI.getOperand(1)->getType(),
"Argument value type does not match pointer operand type!", &RMWI,
ElTy);
- Assert(AtomicRMWInst::FIRST_BINOP <= RMWI.getOperation() &&
- RMWI.getOperation() <= AtomicRMWInst::LAST_BINOP,
+ Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
"Invalid binary operation!", &RMWI);
visitInstruction(RMWI);
}
@@ -3430,7 +3518,7 @@ void Verifier::visitEHPadPredecessors(Instruction &I) {
Instruction *ToPad = &I;
Value *ToPadParent = getParentPad(ToPad);
for (BasicBlock *PredBB : predecessors(BB)) {
- TerminatorInst *TI = PredBB->getTerminator();
+ Instruction *TI = PredBB->getTerminator();
Value *FromPad;
if (auto *II = dyn_cast<InvokeInst>(TI)) {
Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
@@ -3518,7 +3606,7 @@ void Verifier::visitResumeInst(ResumeInst &RI) {
"inside a function.",
&RI);
- visitTerminatorInst(RI);
+ visitTerminator(RI);
}
void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
@@ -3546,7 +3634,7 @@ void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
"CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
CatchReturn.getOperand(0));
- visitTerminatorInst(CatchReturn);
+ visitTerminator(CatchReturn);
}
void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
@@ -3667,7 +3755,7 @@ void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
// Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
getParentPad(UnwindPad) == getParentPad(&FPI))
- SiblingFuncletInfo[&FPI] = cast<TerminatorInst>(U);
+ SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
}
}
// Make sure we visit all uses of FPI, but for nested pads stop as
@@ -3768,7 +3856,7 @@ void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
}
visitEHPadPredecessors(CatchSwitch);
- visitTerminatorInst(CatchSwitch);
+ visitTerminator(CatchSwitch);
}
void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
@@ -3784,7 +3872,7 @@ void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
&CRI);
}
- visitTerminatorInst(CRI);
+ visitTerminator(CRI);
}
void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
@@ -3867,6 +3955,10 @@ void Verifier::visitInstruction(Instruction &I) {
}
}
+ // Get a pointer to the call base of the instruction if it is some form of
+ // call.
+ const CallBase *CBI = dyn_cast<CallBase>(&I);
+
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
@@ -3879,10 +3971,9 @@ void Verifier::visitInstruction(Instruction &I) {
if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
// Check to make sure that the "address of" an intrinsic function is never
// taken.
- Assert(
- !F->isIntrinsic() ||
- i == (isa<CallInst>(I) ? e - 1 : isa<InvokeInst>(I) ? e - 3 : 0),
- "Cannot take the address of an intrinsic!", &I);
+ Assert(!F->isIntrinsic() ||
+ (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
+ "Cannot take the address of an intrinsic!", &I);
Assert(
!F->isIntrinsic() || isa<CallInst>(I) ||
F->getIntrinsicID() == Intrinsic::donothing ||
@@ -3908,8 +3999,7 @@ void Verifier::visitInstruction(Instruction &I) {
} else if (isa<Instruction>(I.getOperand(i))) {
verifyDominatesUse(I, i);
} else if (isa<InlineAsm>(I.getOperand(i))) {
- Assert((i + 1 == e && isa<CallInst>(I)) ||
- (i + 3 == e && isa<InvokeInst>(I)),
+ Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
"Cannot take the address of an inline asm!", &I);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
if (CE->getType()->isPtrOrPtrVectorTy() ||
@@ -3984,15 +4074,15 @@ void Verifier::visitInstruction(Instruction &I) {
visitMDNode(*N);
}
- if (auto *DII = dyn_cast<DbgInfoIntrinsic>(&I))
+ if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
verifyFragmentExpression(*DII);
InstsInThisBlock.insert(&I);
}
/// Allow intrinsics to be verified in different ways.
-void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
- Function *IF = CS.getCalledFunction();
+void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
+ Function *IF = Call.getCalledFunction();
Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
IF);
@@ -4038,15 +4128,15 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
- for (Value *V : CS.args())
+ for (Value *V : Call.args())
if (auto *MD = dyn_cast<MetadataAsValue>(V))
- visitMetadataAsValue(*MD, CS.getCaller());
+ visitMetadataAsValue(*MD, Call.getCaller());
switch (ID) {
default:
break;
case Intrinsic::coro_id: {
- auto *InfoArg = CS.getArgOperand(3)->stripPointerCasts();
+ auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
if (isa<ConstantPointerNull>(InfoArg))
break;
auto *GV = dyn_cast<GlobalVariable>(InfoArg);
@@ -4061,10 +4151,10 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
}
case Intrinsic::ctlz: // llvm.ctlz
case Intrinsic::cttz: // llvm.cttz
- Assert(isa<ConstantInt>(CS.getArgOperand(1)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)),
"is_zero_undef argument of bit counting intrinsics must be a "
"constant int",
- CS);
+ Call);
break;
case Intrinsic::experimental_constrained_fadd:
case Intrinsic::experimental_constrained_fsub:
@@ -4084,59 +4174,64 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
case Intrinsic::experimental_constrained_log2:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
- visitConstrainedFPIntrinsic(
- cast<ConstrainedFPIntrinsic>(*CS.getInstruction()));
+ case Intrinsic::experimental_constrained_maxnum:
+ case Intrinsic::experimental_constrained_minnum:
+ case Intrinsic::experimental_constrained_ceil:
+ case Intrinsic::experimental_constrained_floor:
+ case Intrinsic::experimental_constrained_round:
+ case Intrinsic::experimental_constrained_trunc:
+ visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
break;
case Intrinsic::dbg_declare: // llvm.dbg.declare
- Assert(isa<MetadataAsValue>(CS.getArgOperand(0)),
- "invalid llvm.dbg.declare intrinsic call 1", CS);
- visitDbgIntrinsic("declare", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
+ Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
+ "invalid llvm.dbg.declare intrinsic call 1", Call);
+ visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_addr: // llvm.dbg.addr
- visitDbgIntrinsic("addr", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
+ visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_value: // llvm.dbg.value
- visitDbgIntrinsic("value", cast<DbgInfoIntrinsic>(*CS.getInstruction()));
+ visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
break;
case Intrinsic::dbg_label: // llvm.dbg.label
- visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(*CS.getInstruction()));
+ visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
break;
case Intrinsic::memcpy:
case Intrinsic::memmove:
case Intrinsic::memset: {
- const auto *MI = cast<MemIntrinsic>(CS.getInstruction());
+ const auto *MI = cast<MemIntrinsic>(&Call);
auto IsValidAlignment = [&](unsigned Alignment) -> bool {
return Alignment == 0 || isPowerOf2_32(Alignment);
};
Assert(IsValidAlignment(MI->getDestAlignment()),
"alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
- CS);
+ Call);
if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
Assert(IsValidAlignment(MTI->getSourceAlignment()),
"alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
- CS);
+ Call);
}
- Assert(isa<ConstantInt>(CS.getArgOperand(3)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(3)),
"isvolatile argument of memory intrinsics must be a constant int",
- CS);
+ Call);
break;
}
case Intrinsic::memcpy_element_unordered_atomic:
case Intrinsic::memmove_element_unordered_atomic:
case Intrinsic::memset_element_unordered_atomic: {
- const auto *AMI = cast<AtomicMemIntrinsic>(CS.getInstruction());
+ const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
ConstantInt *ElementSizeCI =
dyn_cast<ConstantInt>(AMI->getRawElementSizeInBytes());
Assert(ElementSizeCI,
"element size of the element-wise unordered atomic memory "
"intrinsic must be a constant int",
- CS);
+ Call);
const APInt &ElementSizeVal = ElementSizeCI->getValue();
Assert(ElementSizeVal.isPowerOf2(),
"element size of the element-wise atomic memory intrinsic "
"must be a power of 2",
- CS);
+ Call);
if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
uint64_t Length = LengthCI->getZExtValue();
@@ -4144,7 +4239,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
Assert((Length % ElementSize) == 0,
"constant length must be a multiple of the element size in the "
"element-wise atomic memory intrinsic",
- CS);
+ Call);
}
auto IsValidAlignment = [&](uint64_t Alignment) {
@@ -4152,11 +4247,11 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
};
uint64_t DstAlignment = AMI->getDestAlignment();
Assert(IsValidAlignment(DstAlignment),
- "incorrect alignment of the destination argument", CS);
+ "incorrect alignment of the destination argument", Call);
if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
uint64_t SrcAlignment = AMT->getSourceAlignment();
Assert(IsValidAlignment(SrcAlignment),
- "incorrect alignment of the source argument", CS);
+ "incorrect alignment of the source argument", Call);
}
break;
}
@@ -4165,76 +4260,76 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
case Intrinsic::gcread:
if (ID == Intrinsic::gcroot) {
AllocaInst *AI =
- dyn_cast<AllocaInst>(CS.getArgOperand(0)->stripPointerCasts());
- Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS);
- Assert(isa<Constant>(CS.getArgOperand(1)),
- "llvm.gcroot parameter #2 must be a constant.", CS);
+ dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
+ Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
+ Assert(isa<Constant>(Call.getArgOperand(1)),
+ "llvm.gcroot parameter #2 must be a constant.", Call);
if (!AI->getAllocatedType()->isPointerTy()) {
- Assert(!isa<ConstantPointerNull>(CS.getArgOperand(1)),
+ Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
"llvm.gcroot parameter #1 must either be a pointer alloca, "
"or argument #2 must be a non-null constant.",
- CS);
+ Call);
}
}
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
break;
case Intrinsic::init_trampoline:
- Assert(isa<Function>(CS.getArgOperand(1)->stripPointerCasts()),
+ Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
"llvm.init_trampoline parameter #2 must resolve to a function.",
- CS);
+ Call);
break;
case Intrinsic::prefetch:
- Assert(isa<ConstantInt>(CS.getArgOperand(1)) &&
- isa<ConstantInt>(CS.getArgOperand(2)) &&
- cast<ConstantInt>(CS.getArgOperand(1))->getZExtValue() < 2 &&
- cast<ConstantInt>(CS.getArgOperand(2))->getZExtValue() < 4,
- "invalid arguments to llvm.prefetch", CS);
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)) &&
+ isa<ConstantInt>(Call.getArgOperand(2)) &&
+ cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
+ cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
+ "invalid arguments to llvm.prefetch", Call);
break;
case Intrinsic::stackprotector:
- Assert(isa<AllocaInst>(CS.getArgOperand(1)->stripPointerCasts()),
- "llvm.stackprotector parameter #2 must resolve to an alloca.", CS);
+ Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
+ "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
- Assert(isa<ConstantInt>(CS.getArgOperand(0)),
+ Assert(isa<ConstantInt>(Call.getArgOperand(0)),
"size argument of memory use markers must be a constant integer",
- CS);
+ Call);
break;
case Intrinsic::invariant_end:
- Assert(isa<ConstantInt>(CS.getArgOperand(1)),
- "llvm.invariant.end parameter #2 must be a constant integer", CS);
+ Assert(isa<ConstantInt>(Call.getArgOperand(1)),
+ "llvm.invariant.end parameter #2 must be a constant integer", Call);
break;
case Intrinsic::localescape: {
- BasicBlock *BB = CS.getParent();
+ BasicBlock *BB = Call.getParent();
Assert(BB == &BB->getParent()->front(),
- "llvm.localescape used outside of entry block", CS);
+ "llvm.localescape used outside of entry block", Call);
Assert(!SawFrameEscape,
- "multiple calls to llvm.localescape in one function", CS);
- for (Value *Arg : CS.args()) {
+ "multiple calls to llvm.localescape in one function", Call);
+ for (Value *Arg : Call.args()) {
if (isa<ConstantPointerNull>(Arg))
continue; // Null values are allowed as placeholders.
auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
Assert(AI && AI->isStaticAlloca(),
- "llvm.localescape only accepts static allocas", CS);
+ "llvm.localescape only accepts static allocas", Call);
}
- FrameEscapeInfo[BB->getParent()].first = CS.getNumArgOperands();
+ FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
SawFrameEscape = true;
break;
}
case Intrinsic::localrecover: {
- Value *FnArg = CS.getArgOperand(0)->stripPointerCasts();
+ Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
Function *Fn = dyn_cast<Function>(FnArg);
Assert(Fn && !Fn->isDeclaration(),
"llvm.localrecover first "
"argument must be function defined in this module",
- CS);
- auto *IdxArg = dyn_cast<ConstantInt>(CS.getArgOperand(2));
+ Call);
+ auto *IdxArg = dyn_cast<ConstantInt>(Call.getArgOperand(2));
Assert(IdxArg, "idx argument of llvm.localrecover must be a constant int",
- CS);
+ Call);
auto &Entry = FrameEscapeInfo[Fn];
Entry.second = unsigned(
std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
@@ -4242,45 +4337,46 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
}
case Intrinsic::experimental_gc_statepoint:
- Assert(!CS.isInlineAsm(),
- "gc.statepoint support for inline assembly unimplemented", CS);
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ if (auto *CI = dyn_cast<CallInst>(&Call))
+ Assert(!CI->isInlineAsm(),
+ "gc.statepoint support for inline assembly unimplemented", CI);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
- verifyStatepoint(CS);
+ verifyStatepoint(Call);
break;
case Intrinsic::experimental_gc_result: {
- Assert(CS.getParent()->getParent()->hasGC(),
- "Enclosing function does not use GC.", CS);
+ Assert(Call.getParent()->getParent()->hasGC(),
+ "Enclosing function does not use GC.", Call);
// Are we tied to a statepoint properly?
- CallSite StatepointCS(CS.getArgOperand(0));
+ const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
const Function *StatepointFn =
- StatepointCS.getInstruction() ? StatepointCS.getCalledFunction() : nullptr;
+ StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
Assert(StatepointFn && StatepointFn->isDeclaration() &&
StatepointFn->getIntrinsicID() ==
Intrinsic::experimental_gc_statepoint,
- "gc.result operand #1 must be from a statepoint", CS,
- CS.getArgOperand(0));
+ "gc.result operand #1 must be from a statepoint", Call,
+ Call.getArgOperand(0));
// Assert that result type matches wrapped callee.
- const Value *Target = StatepointCS.getArgument(2);
+ const Value *Target = StatepointCall->getArgOperand(2);
auto *PT = cast<PointerType>(Target->getType());
auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
- Assert(CS.getType() == TargetFuncType->getReturnType(),
- "gc.result result type does not match wrapped callee", CS);
+ Assert(Call.getType() == TargetFuncType->getReturnType(),
+ "gc.result result type does not match wrapped callee", Call);
break;
}
case Intrinsic::experimental_gc_relocate: {
- Assert(CS.getNumArgOperands() == 3, "wrong number of arguments", CS);
+ Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
- Assert(isa<PointerType>(CS.getType()->getScalarType()),
- "gc.relocate must return a pointer or a vector of pointers", CS);
+ Assert(isa<PointerType>(Call.getType()->getScalarType()),
+ "gc.relocate must return a pointer or a vector of pointers", Call);
// Check that this relocate is correctly tied to the statepoint
// This is case for relocate on the unwinding path of an invoke statepoint
if (LandingPadInst *LandingPad =
- dyn_cast<LandingPadInst>(CS.getArgOperand(0))) {
+ dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
const BasicBlock *InvokeBB =
LandingPad->getParent()->getUniquePredecessor();
@@ -4293,167 +4389,198 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
InvokeBB);
Assert(isStatepoint(InvokeBB->getTerminator()),
"gc relocate should be linked to a statepoint", InvokeBB);
- }
- else {
+ } else {
// In all other cases relocate should be tied to the statepoint directly.
// This covers relocates on a normal return path of invoke statepoint and
// relocates of a call statepoint.
- auto Token = CS.getArgOperand(0);
+ auto Token = Call.getArgOperand(0);
Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
- "gc relocate is incorrectly tied to the statepoint", CS, Token);
+ "gc relocate is incorrectly tied to the statepoint", Call, Token);
}
// Verify rest of the relocate arguments.
-
- ImmutableCallSite StatepointCS(
- cast<GCRelocateInst>(*CS.getInstruction()).getStatepoint());
+ const CallBase &StatepointCall =
+ *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
// Both the base and derived must be piped through the safepoint.
- Value* Base = CS.getArgOperand(1);
+ Value *Base = Call.getArgOperand(1);
Assert(isa<ConstantInt>(Base),
- "gc.relocate operand #2 must be integer offset", CS);
+ "gc.relocate operand #2 must be integer offset", Call);
- Value* Derived = CS.getArgOperand(2);
+ Value *Derived = Call.getArgOperand(2);
Assert(isa<ConstantInt>(Derived),
- "gc.relocate operand #3 must be integer offset", CS);
+ "gc.relocate operand #3 must be integer offset", Call);
const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
// Check the bounds
- Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCS.arg_size(),
- "gc.relocate: statepoint base index out of bounds", CS);
- Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCS.arg_size(),
- "gc.relocate: statepoint derived index out of bounds", CS);
+ Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
+ "gc.relocate: statepoint base index out of bounds", Call);
+ Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
+ "gc.relocate: statepoint derived index out of bounds", Call);
// Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
// section of the statepoint's argument.
- Assert(StatepointCS.arg_size() > 0,
+ Assert(StatepointCall.arg_size() > 0,
"gc.statepoint: insufficient arguments");
- Assert(isa<ConstantInt>(StatepointCS.getArgument(3)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
"gc.statement: number of call arguments must be constant integer");
const unsigned NumCallArgs =
- cast<ConstantInt>(StatepointCS.getArgument(3))->getZExtValue();
- Assert(StatepointCS.arg_size() > NumCallArgs + 5,
+ cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
+ Assert(StatepointCall.arg_size() > NumCallArgs + 5,
"gc.statepoint: mismatch in number of call arguments");
- Assert(isa<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
"gc.statepoint: number of transition arguments must be "
"a constant integer");
const int NumTransitionArgs =
- cast<ConstantInt>(StatepointCS.getArgument(NumCallArgs + 5))
+ cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
->getZExtValue();
const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
- Assert(isa<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart)),
+ Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
"gc.statepoint: number of deoptimization arguments must be "
"a constant integer");
const int NumDeoptArgs =
- cast<ConstantInt>(StatepointCS.getArgument(DeoptArgsStart))
+ cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
->getZExtValue();
const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
- const int GCParamArgsEnd = StatepointCS.arg_size();
+ const int GCParamArgsEnd = StatepointCall.arg_size();
Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
"gc.relocate: statepoint base index doesn't fall within the "
"'gc parameters' section of the statepoint call",
- CS);
+ Call);
Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
"gc.relocate: statepoint derived index doesn't fall within the "
"'gc parameters' section of the statepoint call",
- CS);
+ Call);
// Relocated value must be either a pointer type or vector-of-pointer type,
// but gc_relocate does not need to return the same pointer type as the
// relocated pointer. It can be casted to the correct type later if it's
// desired. However, they must have the same address space and 'vectorness'
- GCRelocateInst &Relocate = cast<GCRelocateInst>(*CS.getInstruction());
+ GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
- "gc.relocate: relocated value must be a gc pointer", CS);
+ "gc.relocate: relocated value must be a gc pointer", Call);
- auto ResultType = CS.getType();
+ auto ResultType = Call.getType();
auto DerivedType = Relocate.getDerivedPtr()->getType();
Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
"gc.relocate: vector relocates to vector and pointer to pointer",
- CS);
+ Call);
Assert(
ResultType->getPointerAddressSpace() ==
DerivedType->getPointerAddressSpace(),
"gc.relocate: relocating a pointer shouldn't change its address space",
- CS);
+ Call);
break;
}
case Intrinsic::eh_exceptioncode:
case Intrinsic::eh_exceptionpointer: {
- Assert(isa<CatchPadInst>(CS.getArgOperand(0)),
- "eh.exceptionpointer argument must be a catchpad", CS);
+ Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
+ "eh.exceptionpointer argument must be a catchpad", Call);
break;
}
case Intrinsic::masked_load: {
- Assert(CS.getType()->isVectorTy(), "masked_load: must return a vector", CS);
+ Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
+ Call);
- Value *Ptr = CS.getArgOperand(0);
- //Value *Alignment = CS.getArgOperand(1);
- Value *Mask = CS.getArgOperand(2);
- Value *PassThru = CS.getArgOperand(3);
- Assert(Mask->getType()->isVectorTy(),
- "masked_load: mask must be vector", CS);
+ Value *Ptr = Call.getArgOperand(0);
+ // Value *Alignment = Call.getArgOperand(1);
+ Value *Mask = Call.getArgOperand(2);
+ Value *PassThru = Call.getArgOperand(3);
+ Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
+ Call);
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
- Assert(DataTy == CS.getType(),
- "masked_load: return must match pointer type", CS);
+ Assert(DataTy == Call.getType(),
+ "masked_load: return must match pointer type", Call);
Assert(PassThru->getType() == DataTy,
- "masked_load: pass through and data type must match", CS);
+ "masked_load: pass through and data type must match", Call);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
- "masked_load: vector mask must be same length as data", CS);
+ DataTy->getVectorNumElements(),
+ "masked_load: vector mask must be same length as data", Call);
break;
}
case Intrinsic::masked_store: {
- Value *Val = CS.getArgOperand(0);
- Value *Ptr = CS.getArgOperand(1);
- //Value *Alignment = CS.getArgOperand(2);
- Value *Mask = CS.getArgOperand(3);
- Assert(Mask->getType()->isVectorTy(),
- "masked_store: mask must be vector", CS);
+ Value *Val = Call.getArgOperand(0);
+ Value *Ptr = Call.getArgOperand(1);
+ // Value *Alignment = Call.getArgOperand(2);
+ Value *Mask = Call.getArgOperand(3);
+ Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
+ Call);
// DataTy is the overloaded type
Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
Assert(DataTy == Val->getType(),
- "masked_store: storee must match pointer type", CS);
+ "masked_store: storee must match pointer type", Call);
Assert(Mask->getType()->getVectorNumElements() ==
- DataTy->getVectorNumElements(),
- "masked_store: vector mask must be same length as data", CS);
+ DataTy->getVectorNumElements(),
+ "masked_store: vector mask must be same length as data", Call);
break;
}
case Intrinsic::experimental_guard: {
- Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
- Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
+ Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
"experimental_guard must have exactly one "
"\"deopt\" operand bundle");
break;
}
case Intrinsic::experimental_deoptimize: {
- Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
- Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
+ Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
+ Call);
+ Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
"experimental_deoptimize must have exactly one "
"\"deopt\" operand bundle");
- Assert(CS.getType() == CS.getInstruction()->getFunction()->getReturnType(),
+ Assert(Call.getType() == Call.getFunction()->getReturnType(),
"experimental_deoptimize return type must match caller return type");
- if (CS.isCall()) {
- auto *DeoptCI = CS.getInstruction();
- auto *RI = dyn_cast<ReturnInst>(DeoptCI->getNextNode());
+ if (isa<CallInst>(Call)) {
+ auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
Assert(RI,
"calls to experimental_deoptimize must be followed by a return");
- if (!CS.getType()->isVoidTy() && RI)
- Assert(RI->getReturnValue() == DeoptCI,
+ if (!Call.getType()->isVoidTy() && RI)
+ Assert(RI->getReturnValue() == &Call,
"calls to experimental_deoptimize must be followed by a return "
"of the value computed by experimental_deoptimize");
}
break;
}
+ case Intrinsic::sadd_sat:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::usub_sat: {
+ Value *Op1 = Call.getArgOperand(0);
+ Value *Op2 = Call.getArgOperand(1);
+ Assert(Op1->getType()->isIntOrIntVectorTy(),
+ "first operand of [us][add|sub]_sat must be an int type or vector "
+ "of ints");
+ Assert(Op2->getType()->isIntOrIntVectorTy(),
+ "second operand of [us][add|sub]_sat must be an int type or vector "
+ "of ints");
+ break;
+ }
+ case Intrinsic::smul_fix: {
+ Value *Op1 = Call.getArgOperand(0);
+ Value *Op2 = Call.getArgOperand(1);
+ Assert(Op1->getType()->isIntOrIntVectorTy(),
+ "first operand of smul_fix must be an int type or vector "
+ "of ints");
+ Assert(Op2->getType()->isIntOrIntVectorTy(),
+ "second operand of smul_fix must be an int type or vector "
+ "of ints");
+
+ auto *Op3 = dyn_cast<ConstantInt>(Call.getArgOperand(2));
+ Assert(Op3, "third argument of smul_fix must be a constant integer");
+ Assert(Op3->getType()->getBitWidth() <= 32,
+ "third argument of smul_fix must fit within 32 bits");
+ Assert(Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
+ "the scale of smul_fix must be less than the width of the operands");
+ break;
+ }
};
}
@@ -4491,7 +4618,7 @@ void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
"invalid exception behavior argument", &FPI);
}
-void Verifier::visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII) {
+void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
AssertDI(isa<ValueAsMetadata>(MD) ||
(isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
@@ -4527,13 +4654,21 @@ void Verifier::visitDbgIntrinsic(StringRef Kind, DbgInfoIntrinsic &DII) {
&DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
Loc->getScope()->getSubprogram());
+ // This check is redundant with one in visitLocalVariable().
+ AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
+ Var->getRawType());
+ if (auto *Type = dyn_cast_or_null<DIType>(Var->getRawType()))
+ if (Type->isBlockByrefStruct())
+ AssertDI(DII.getExpression() && DII.getExpression()->getNumElements(),
+ "BlockByRef variable without complex expression", Var, &DII);
+
verifyFnArgs(DII);
}
void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
- AssertDI(isa<DILabel>(DLI.getRawVariable()),
+ AssertDI(isa<DILabel>(DLI.getRawLabel()),
"invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
- DLI.getRawVariable());
+ DLI.getRawLabel());
// Ignore broken !dbg attachments; they're checked elsewhere.
if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
@@ -4560,10 +4695,7 @@ void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
Loc->getScope()->getSubprogram());
}
-void Verifier::verifyFragmentExpression(const DbgInfoIntrinsic &I) {
- if (dyn_cast<DbgLabelInst>(&I))
- return;
-
+void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
@@ -4605,7 +4737,7 @@ void Verifier::verifyFragmentExpression(const DIVariable &V,
AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
}
-void Verifier::verifyFnArgs(const DbgInfoIntrinsic &I) {
+void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
// This function does not take the scope of noninlined function arguments into
// account. Don't run it if current function is nodebug, because it may
// contain inlined debug intrinsics.
@@ -4662,6 +4794,14 @@ void Verifier::verifyDeoptimizeCallingConvs() {
}
}
+void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
+ bool HasSource = F.getSource().hasValue();
+ if (!HasSourceDebugInfo.count(&U))
+ HasSourceDebugInfo[&U] = HasSource;
+ AssertDI(HasSource == HasSourceDebugInfo[&U],
+ "inconsistent use of embedded source");
+}
+
//===----------------------------------------------------------------------===//
// Implement the public interfaces to this file...
//===----------------------------------------------------------------------===//
@@ -4718,9 +4858,10 @@ struct VerifierLegacyPass : public FunctionPass {
}
bool runOnFunction(Function &F) override {
- if (!V->verify(F) && FatalErrors)
+ if (!V->verify(F) && FatalErrors) {
+ errs() << "in function " << F.getName() << '\n';
report_fatal_error("Broken function found, compilation aborted!");
-
+ }
return false;
}