summaryrefslogtreecommitdiff
path: root/lib/VMCore
diff options
context:
space:
mode:
Diffstat (limited to 'lib/VMCore')
-rw-r--r--lib/VMCore/AsmWriter.cpp16
-rw-r--r--lib/VMCore/AutoUpgrade.cpp120
-rw-r--r--lib/VMCore/Constants.cpp37
-rw-r--r--lib/VMCore/Core.cpp8
-rw-r--r--lib/VMCore/Dominators.cpp8
-rw-r--r--lib/VMCore/Instructions.cpp101
-rw-r--r--lib/VMCore/LLVMContext.cpp38
-rw-r--r--lib/VMCore/LLVMContextImpl.cpp1
-rw-r--r--lib/VMCore/LeaksContext.h5
-rw-r--r--lib/VMCore/Makefile2
-rw-r--r--lib/VMCore/Metadata.cpp17
-rw-r--r--lib/VMCore/Pass.cpp6
-rw-r--r--lib/VMCore/PassManager.cpp5
-rw-r--r--lib/VMCore/Type.cpp8
-rw-r--r--lib/VMCore/TypeSymbolTable.cpp6
-rw-r--r--lib/VMCore/Verifier.cpp105
16 files changed, 300 insertions, 183 deletions
diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp
index f6a6076df7bc4..6c1aa5ed10c6f 100644
--- a/lib/VMCore/AsmWriter.cpp
+++ b/lib/VMCore/AsmWriter.cpp
@@ -227,13 +227,15 @@ void TypePrinting::CalcTypeName(const Type *Ty,
const StructType *STy = cast<StructType>(Ty);
if (STy->isPacked())
OS << '<';
- OS << "{ ";
+ OS << '{';
for (StructType::element_iterator I = STy->element_begin(),
E = STy->element_end(); I != E; ++I) {
+ OS << ' ';
CalcTypeName(*I, TypeStack, OS);
- if (next(I) != STy->element_end())
+ if (next(I) == STy->element_end())
+ OS << ' ';
+ else
OS << ',';
- OS << ' ';
}
OS << '}';
if (STy->isPacked())
@@ -242,13 +244,15 @@ void TypePrinting::CalcTypeName(const Type *Ty,
}
case Type::UnionTyID: {
const UnionType *UTy = cast<UnionType>(Ty);
- OS << "union { ";
+ OS << "union {";
for (StructType::element_iterator I = UTy->element_begin(),
E = UTy->element_end(); I != E; ++I) {
+ OS << ' ';
CalcTypeName(*I, TypeStack, OS);
- if (next(I) != UTy->element_end())
+ if (next(I) == UTy->element_end())
+ OS << ' ';
+ else
OS << ',';
- OS << ' ';
}
OS << '}';
break;
diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp
index 4d06b66681694..0144210767d80 100644
--- a/lib/VMCore/AutoUpgrade.cpp
+++ b/lib/VMCore/AutoUpgrade.cpp
@@ -19,6 +19,7 @@
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/IRBuilder.h"
#include <cstring>
using namespace llvm;
@@ -277,8 +278,13 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
// Calls to these intrinsics are transformed into vector multiplies.
NewFn = 0;
return true;
+ } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
+ Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
+ // Calls to these intrinsics are transformed into vector shuffles, shifts,
+ // or 0.
+ NewFn = 0;
+ return true;
}
-
break;
}
@@ -420,6 +426,118 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
// Remove upgraded multiply.
CI->eraseFromParent();
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
+ Value *Op1 = CI->getOperand(1);
+ Value *Op2 = CI->getOperand(2);
+ Value *Op3 = CI->getOperand(3);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 8);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 1);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
+ } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
+ Value *Op1 = CI->getOperand(1);
+ Value *Op2 = CI->getOperand(2);
+ Value *Op3 = CI->getOperand(3);
+ unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
+ Value *Rep;
+ IRBuilder<> Builder(C);
+ Builder.SetInsertPoint(CI->getParent(), CI);
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ const Type *IntTy = Type::getInt32Ty(C);
+ const Type *EltTy = Type::getInt8Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 16);
+
+ Op2 = Builder.CreateBitCast(Op2, VecTy);
+ Op1 = Builder.CreateBitCast(Op1, VecTy);
+
+ llvm::SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
+
+ Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
+ Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
+ Rep = Builder.CreateBitCast(Rep, F->getReturnType());
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ else if (shiftVal < 32) {
+ const Type *EltTy = Type::getInt64Ty(C);
+ const Type *VecTy = VectorType::get(EltTy, 2);
+ const Type *IntTy = Type::getInt32Ty(C);
+
+ Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
+ Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
+
+ // create i32 constant
+ Function *I =
+ Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
+ Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ else {
+ Rep = Constant::getNullValue(F->getReturnType());
+ }
+
+ // Replace any uses with our new instruction.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(Rep);
+
+ // Remove upgraded instruction.
+ CI->eraseFromParent();
+
} else {
llvm_unreachable("Unknown function for CallInst upgrade.");
}
diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp
index 1553bd51342d2..00b009401dccd 100644
--- a/lib/VMCore/Constants.cpp
+++ b/lib/VMCore/Constants.cpp
@@ -1224,20 +1224,20 @@ Constant *ConstantExpr::getCast(unsigned oc, Constant *C, const Type *Ty) {
Constant *ConstantExpr::getZExtOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::ZExt, C, Ty);
+ return getBitCast(C, Ty);
+ return getZExt(C, Ty);
}
Constant *ConstantExpr::getSExtOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::SExt, C, Ty);
+ return getBitCast(C, Ty);
+ return getSExt(C, Ty);
}
Constant *ConstantExpr::getTruncOrBitCast(Constant *C, const Type *Ty) {
if (C->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
- return getCast(Instruction::BitCast, C, Ty);
- return getCast(Instruction::Trunc, C, Ty);
+ return getBitCast(C, Ty);
+ return getTrunc(C, Ty);
}
Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
@@ -1245,8 +1245,8 @@ Constant *ConstantExpr::getPointerCast(Constant *S, const Type *Ty) {
assert((Ty->isIntegerTy() || Ty->isPointerTy()) && "Invalid cast");
if (Ty->isIntegerTy())
- return getCast(Instruction::PtrToInt, S, Ty);
- return getCast(Instruction::BitCast, S, Ty);
+ return getPtrToInt(S, Ty);
+ return getBitCast(S, Ty);
}
Constant *ConstantExpr::getIntegerCast(Constant *C, const Type *Ty,
@@ -1450,12 +1450,6 @@ Constant *ConstantExpr::getCompareTy(unsigned short predicate,
Constant *ConstantExpr::get(unsigned Opcode, Constant *C1, Constant *C2,
unsigned Flags) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (C1->getType()->isFPOrFPVectorTy()) {
- if (Opcode == Instruction::Add) Opcode = Instruction::FAdd;
- else if (Opcode == Instruction::Sub) Opcode = Instruction::FSub;
- else if (Opcode == Instruction::Mul) Opcode = Instruction::FMul;
- }
#ifndef NDEBUG
switch (Opcode) {
case Instruction::Add:
@@ -1523,8 +1517,8 @@ Constant* ConstantExpr::getSizeOf(const Type* Ty) {
Constant *GEPIdx = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *GEP = getGetElementPtr(
Constant::getNullValue(PointerType::getUnqual(Ty)), &GEPIdx, 1);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant* ConstantExpr::getAlignOf(const Type* Ty) {
@@ -1537,8 +1531,8 @@ Constant* ConstantExpr::getAlignOf(const Type* Ty) {
Constant *One = ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 1);
Constant *Indices[2] = { Zero, One };
Constant *GEP = getGetElementPtr(NullPtr, Indices, 2);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant* ConstantExpr::getOffsetOf(const StructType* STy, unsigned FieldNo) {
@@ -1555,8 +1549,8 @@ Constant* ConstantExpr::getOffsetOf(const Type* Ty, Constant *FieldNo) {
};
Constant *GEP = getGetElementPtr(
Constant::getNullValue(PointerType::getUnqual(Ty)), GEPIdx, 2);
- return getCast(Instruction::PtrToInt, GEP,
- Type::getInt64Ty(Ty->getContext()));
+ return getPtrToInt(GEP,
+ Type::getInt64Ty(Ty->getContext()));
}
Constant *ConstantExpr::getCompare(unsigned short pred,
@@ -1840,9 +1834,6 @@ Constant *ConstantExpr::getExtractValue(Constant *Agg,
}
Constant* ConstantExpr::getNeg(Constant* C) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (C->getType()->isFPOrFPVectorTy())
- return getFNeg(C);
assert(C->getType()->isIntOrIntVectorTy() &&
"Cannot NEG a nonintegral value!");
return get(Instruction::Sub,
diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp
index 634407ca13ff7..bbf1375ab0c78 100644
--- a/lib/VMCore/Core.cpp
+++ b/lib/VMCore/Core.cpp
@@ -119,6 +119,11 @@ void LLVMDumpModule(LLVMModuleRef M) {
unwrap(M)->dump();
}
+/*--.. Operations on inline assembler ......................................--*/
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm) {
+ unwrap(M)->setModuleInlineAsm(StringRef(Asm));
+}
+
/*===-- Operations on types -----------------------------------------------===*/
@@ -322,8 +327,7 @@ LLVMTypeRef LLVMUnionTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
return wrap(UnionType::get(&Tys[0], Tys.size()));
}
-LLVMTypeRef LLVMUnionType(LLVMTypeRef *ElementTypes,
- unsigned ElementCount, int Packed) {
+LLVMTypeRef LLVMUnionType(LLVMTypeRef *ElementTypes, unsigned ElementCount) {
return LLVMUnionTypeInContext(LLVMGetGlobalContext(), ElementTypes,
ElementCount);
}
diff --git a/lib/VMCore/Dominators.cpp b/lib/VMCore/Dominators.cpp
index 34417505814f7..10a866fab6226 100644
--- a/lib/VMCore/Dominators.cpp
+++ b/lib/VMCore/Dominators.cpp
@@ -30,9 +30,9 @@ using namespace llvm;
// Always verify dominfo if expensive checking is enabled.
#ifdef XDEBUG
-bool VerifyDomInfo = true;
+static bool VerifyDomInfo = true;
#else
-bool VerifyDomInfo = false;
+static bool VerifyDomInfo = false;
#endif
static cl::opt<bool,true>
VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo),
@@ -119,7 +119,7 @@ void DominanceFrontier::verifyAnalysis() const {
assert(!compare(OtherDF) && "Invalid DominanceFrontier info!");
}
-// NewBB is split and now it has one successor. Update dominace frontier to
+// NewBB is split and now it has one successor. Update dominance frontier to
// reflect this change.
void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
assert(NewBB->getTerminator()->getNumSuccessors() == 1
@@ -129,7 +129,7 @@ void DominanceFrontier::splitBlock(BasicBlock *NewBB) {
SmallVector<BasicBlock*, 8> PredBlocks;
for (pred_iterator PI = pred_begin(NewBB), PE = pred_end(NewBB);
PI != PE; ++PI)
- PredBlocks.push_back(*PI);
+ PredBlocks.push_back(*PI);
if (PredBlocks.empty())
// If NewBB does not have any predecessors then it is a entry block.
diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp
index 4609a64213bf2..f64b220c3fde3 100644
--- a/lib/VMCore/Instructions.cpp
+++ b/lib/VMCore/Instructions.cpp
@@ -30,80 +30,6 @@ using namespace llvm;
// CallSite Class
//===----------------------------------------------------------------------===//
-#define CALLSITE_DELEGATE_GETTER(METHOD) \
- Instruction *II = getInstruction(); \
- return isCall() \
- ? cast<CallInst>(II)->METHOD \
- : cast<InvokeInst>(II)->METHOD
-
-#define CALLSITE_DELEGATE_SETTER(METHOD) \
- Instruction *II = getInstruction(); \
- if (isCall()) \
- cast<CallInst>(II)->METHOD; \
- else \
- cast<InvokeInst>(II)->METHOD
-
-CallingConv::ID CallSite::getCallingConv() const {
- CALLSITE_DELEGATE_GETTER(getCallingConv());
-}
-void CallSite::setCallingConv(CallingConv::ID CC) {
- CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
-}
-const AttrListPtr &CallSite::getAttributes() const {
- CALLSITE_DELEGATE_GETTER(getAttributes());
-}
-void CallSite::setAttributes(const AttrListPtr &PAL) {
- CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
-}
-bool CallSite::paramHasAttr(uint16_t i, Attributes attr) const {
- CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr));
-}
-uint16_t CallSite::getParamAlignment(uint16_t i) const {
- CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
-}
-
-/// @brief Return true if the call should not be inlined.
-bool CallSite::isNoInline() const {
- CALLSITE_DELEGATE_GETTER(isNoInline());
-}
-
-void CallSite::setIsNoInline(bool Value) {
- CALLSITE_DELEGATE_GETTER(setIsNoInline(Value));
-}
-
-
-bool CallSite::doesNotAccessMemory() const {
- CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
-}
-void CallSite::setDoesNotAccessMemory(bool doesNotAccessMemory) {
- CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory));
-}
-bool CallSite::onlyReadsMemory() const {
- CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
-}
-void CallSite::setOnlyReadsMemory(bool onlyReadsMemory) {
- CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory));
-}
-bool CallSite::doesNotReturn() const {
- CALLSITE_DELEGATE_GETTER(doesNotReturn());
-}
-void CallSite::setDoesNotReturn(bool doesNotReturn) {
- CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn));
-}
-bool CallSite::doesNotThrow() const {
- CALLSITE_DELEGATE_GETTER(doesNotThrow());
-}
-void CallSite::setDoesNotThrow(bool doesNotThrow) {
- CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow));
-}
-
-bool CallSite::hasArgument(const Value *Arg) const {
- for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E; ++AI)
- if (AI->get() == Arg)
- return true;
- return false;
-}
-
User::op_iterator CallSite::getCallee() const {
Instruction *II(getInstruction());
return isCall()
@@ -111,9 +37,6 @@ User::op_iterator CallSite::getCallee() const {
: cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Function
}
-#undef CALLSITE_DELEGATE_GETTER
-#undef CALLSITE_DELEGATE_SETTER
-
//===----------------------------------------------------------------------===//
// TerminatorInst Class
//===----------------------------------------------------------------------===//
@@ -1639,43 +1562,29 @@ const Type* ExtractValueInst::getIndexedType(const Type *Agg,
// BinaryOperator Class
//===----------------------------------------------------------------------===//
-/// AdjustIType - Map Add, Sub, and Mul to FAdd, FSub, and FMul when the
-/// type is floating-point, to help provide compatibility with an older API.
-///
-static BinaryOperator::BinaryOps AdjustIType(BinaryOperator::BinaryOps iType,
- const Type *Ty) {
- // API compatibility: Adjust integer opcodes to floating-point opcodes.
- if (Ty->isFPOrFPVectorTy()) {
- if (iType == BinaryOperator::Add) iType = BinaryOperator::FAdd;
- else if (iType == BinaryOperator::Sub) iType = BinaryOperator::FSub;
- else if (iType == BinaryOperator::Mul) iType = BinaryOperator::FMul;
- }
- return iType;
-}
-
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
const Type *Ty, const Twine &Name,
Instruction *InsertBefore)
- : Instruction(Ty, AdjustIType(iType, Ty),
+ : Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
OperandTraits<BinaryOperator>::operands(this),
InsertBefore) {
Op<0>() = S1;
Op<1>() = S2;
- init(AdjustIType(iType, Ty));
+ init(iType);
setName(Name);
}
BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
const Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
- : Instruction(Ty, AdjustIType(iType, Ty),
+ : Instruction(Ty, iType,
OperandTraits<BinaryOperator>::op_begin(this),
OperandTraits<BinaryOperator>::operands(this),
InsertAtEnd) {
Op<0>() = S1;
Op<1>() = S2;
- init(AdjustIType(iType, Ty));
+ init(iType);
setName(Name);
}
@@ -2060,7 +1969,7 @@ unsigned CastInst::isEliminableCastPair(
// FPEXT < FloatPt n/a FloatPt n/a
// PTRTOINT n/a Pointer n/a Integral Unsigned
// INTTOPTR n/a Integral Unsigned Pointer n/a
- // BITCONVERT = FirstClass n/a FirstClass n/a
+ // BITCAST = FirstClass n/a FirstClass n/a
//
// NOTE: some transforms are safe, but we consider them to be non-profitable.
// For example, we could merge "fptoui double to i32" + "zext i32 to i64",
diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp
index 3244f2842c4fd..4d61363b9394d 100644
--- a/lib/VMCore/LLVMContext.cpp
+++ b/lib/VMCore/LLVMContext.cpp
@@ -17,6 +17,7 @@
#include "llvm/Constants.h"
#include "llvm/Instruction.h"
#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/SourceMgr.h"
#include "LLVMContextImpl.h"
using namespace llvm;
@@ -33,6 +34,10 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
}
LLVMContext::~LLVMContext() { delete pImpl; }
+//===----------------------------------------------------------------------===//
+// Recoverable Backend Errors
+//===----------------------------------------------------------------------===//
+
void LLVMContext::setInlineAsmDiagnosticHandler(void *DiagHandler,
void *DiagContext) {
pImpl->InlineAsmDiagHandler = DiagHandler;
@@ -51,6 +56,39 @@ void *LLVMContext::getInlineAsmDiagnosticContext() const {
return pImpl->InlineAsmDiagContext;
}
+void LLVMContext::emitError(StringRef ErrorStr) {
+ emitError(0U, ErrorStr);
+}
+
+void LLVMContext::emitError(const Instruction *I, StringRef ErrorStr) {
+ unsigned LocCookie = 0;
+ if (const MDNode *SrcLoc = I->getMetadata("srcloc")) {
+ if (SrcLoc->getNumOperands() != 0)
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(SrcLoc->getOperand(0)))
+ LocCookie = CI->getZExtValue();
+ }
+ return emitError(LocCookie, ErrorStr);
+}
+
+void LLVMContext::emitError(unsigned LocCookie, StringRef ErrorStr) {
+ // If there is no error handler installed, just print the error and exit.
+ if (pImpl->InlineAsmDiagHandler == 0) {
+ errs() << "error: " << ErrorStr << "\n";
+ exit(1);
+ }
+
+ // If we do have an error handler, we can report the error and keep going.
+ SMDiagnostic Diag("", "error: " + ErrorStr.str());
+
+ ((SourceMgr::DiagHandlerTy)(intptr_t)pImpl->InlineAsmDiagHandler)
+ (Diag, pImpl->InlineAsmDiagContext, LocCookie);
+
+}
+
+//===----------------------------------------------------------------------===//
+// Metadata Kind Uniquing
+//===----------------------------------------------------------------------===//
+
#ifndef NDEBUG
/// isValidName - Return true if Name is a valid custom metadata handler name.
static bool isValidName(StringRef MDName) {
diff --git a/lib/VMCore/LLVMContextImpl.cpp b/lib/VMCore/LLVMContextImpl.cpp
index e71157f44020c..9e41a08156084 100644
--- a/lib/VMCore/LLVMContextImpl.cpp
+++ b/lib/VMCore/LLVMContextImpl.cpp
@@ -13,6 +13,7 @@
#include "LLVMContextImpl.h"
#include <algorithm>
+using namespace llvm;
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
: TheTrueVal(0), TheFalseVal(0),
diff --git a/lib/VMCore/LeaksContext.h b/lib/VMCore/LeaksContext.h
index abff090b87963..b9e59d46b7ad6 100644
--- a/lib/VMCore/LeaksContext.h
+++ b/lib/VMCore/LeaksContext.h
@@ -14,7 +14,8 @@
#include "llvm/Value.h"
#include "llvm/ADT/SmallPtrSet.h"
-using namespace llvm;
+
+namespace llvm {
template <class T>
struct PrinterTrait {
@@ -87,3 +88,5 @@ private:
const T* Cache;
const char* Name;
};
+
+}
diff --git a/lib/VMCore/Makefile b/lib/VMCore/Makefile
index 4395ecfda05bb..03a4fc707debb 100644
--- a/lib/VMCore/Makefile
+++ b/lib/VMCore/Makefile
@@ -1,4 +1,4 @@
-##===- lib/VMCore/Makefile ------------------------------*- Makefile -*-===##
+##===- lib/VMCore/Makefile ---------------------------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
diff --git a/lib/VMCore/Metadata.cpp b/lib/VMCore/Metadata.cpp
index 72de0321c3aae..092fe00a53697 100644
--- a/lib/VMCore/Metadata.cpp
+++ b/lib/VMCore/Metadata.cpp
@@ -178,6 +178,13 @@ void MDNode::destroy() {
free(this);
}
+/// isFunctionLocalValue - Return true if this is a value that would require a
+/// function-local MDNode.
+static bool isFunctionLocalValue(Value *V) {
+ return isa<Instruction>(V) || isa<Argument>(V) || isa<BasicBlock>(V) ||
+ (isa<MDNode>(V) && cast<MDNode>(V)->isFunctionLocal());
+}
+
MDNode *MDNode::getMDNode(LLVMContext &Context, Value *const *Vals,
unsigned NumVals, FunctionLocalness FL,
bool Insert) {
@@ -188,8 +195,7 @@ MDNode *MDNode::getMDNode(LLVMContext &Context, Value *const *Vals,
for (unsigned i = 0; i != NumVals; ++i) {
Value *V = Vals[i];
if (!V) continue;
- if (isa<Instruction>(V) || isa<Argument>(V) || isa<BasicBlock>(V) ||
- (isa<MDNode>(V) && cast<MDNode>(V)->isFunctionLocal())) {
+ if (isFunctionLocalValue(V)) {
isFunctionLocal = true;
break;
}
@@ -262,6 +268,13 @@ void MDNode::setIsNotUniqued() {
void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) {
Value *From = *Op;
+ // If is possible that someone did GV->RAUW(inst), replacing a global variable
+ // with an instruction or some other function-local object. If this is a
+ // non-function-local MDNode, it can't point to a function-local object.
+ // Handle this case by implicitly dropping the MDNode reference to null.
+ if (!isFunctionLocal() && To && isFunctionLocalValue(To))
+ To = 0;
+
if (From == To)
return;
diff --git a/lib/VMCore/Pass.cpp b/lib/VMCore/Pass.cpp
index 6b941f34996e1..a60877db2f627 100644
--- a/lib/VMCore/Pass.cpp
+++ b/lib/VMCore/Pass.cpp
@@ -318,6 +318,8 @@ static PassRegistrar *getPassRegistrar() {
return PassRegistrarObj;
}
+namespace {
+
// FIXME: We use ManagedCleanup to erase the pass registrar on shutdown.
// Unfortunately, passes are registered with static ctors, and having
// llvm_shutdown clear this map prevents successful ressurection after
@@ -329,7 +331,9 @@ void cleanupPassRegistrar(void*) {
PassRegistrarObj = 0;
}
}
-ManagedCleanup<&cleanupPassRegistrar> registrarCleanup;
+ManagedCleanup<&cleanupPassRegistrar> registrarCleanup ATTRIBUTE_USED;
+
+}
// getPassInfo - Return the PassInfo data structure that corresponds to this
// pass...
diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp
index 6ca35ac0260f6..b28fdebd52320 100644
--- a/lib/VMCore/PassManager.cpp
+++ b/lib/VMCore/PassManager.cpp
@@ -1293,9 +1293,8 @@ void FunctionPassManager::add(Pass *P) {
bool FunctionPassManager::run(Function &F) {
if (F.isMaterializable()) {
std::string errstr;
- if (F.Materialize(&errstr)) {
- llvm_report_error("Error reading bitcode file: " + errstr);
- }
+ if (F.Materialize(&errstr))
+ report_fatal_error("Error reading bitcode file: " + Twine(errstr));
}
return FPM->run(F);
}
diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp
index 5f9c11fc0ed96..845b523c24216 100644
--- a/lib/VMCore/Type.cpp
+++ b/lib/VMCore/Type.cpp
@@ -380,6 +380,10 @@ const Type *Type::getPPC_FP128Ty(LLVMContext &C) {
return &C.pImpl->PPC_FP128Ty;
}
+const IntegerType *Type::getIntNTy(LLVMContext &C, unsigned N) {
+ return IntegerType::get(C, N);
+}
+
const IntegerType *Type::getInt1Ty(LLVMContext &C) {
return &C.pImpl->Int1Ty;
}
@@ -420,6 +424,10 @@ const PointerType *Type::getPPC_FP128PtrTy(LLVMContext &C, unsigned AS) {
return getPPC_FP128Ty(C)->getPointerTo(AS);
}
+const PointerType *Type::getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS) {
+ return getIntNTy(C, N)->getPointerTo(AS);
+}
+
const PointerType *Type::getInt1PtrTy(LLVMContext &C, unsigned AS) {
return getInt1Ty(C)->getPointerTo(AS);
}
diff --git a/lib/VMCore/TypeSymbolTable.cpp b/lib/VMCore/TypeSymbolTable.cpp
index b4daf0f63144f..d68a44bd6711c 100644
--- a/lib/VMCore/TypeSymbolTable.cpp
+++ b/lib/VMCore/TypeSymbolTable.cpp
@@ -126,13 +126,15 @@ void TypeSymbolTable::refineAbstractType(const DerivedType *OldType,
// faster to remove them all in one pass.
//
for (iterator I = begin(), E = end(); I != E; ++I) {
- if (I->second == (Type*)OldType) { // FIXME when Types aren't const.
+ // FIXME when Types aren't const.
+ if (I->second == const_cast<DerivedType *>(OldType)) {
#if DEBUG_ABSTYPE
dbgs() << "Removing type " << OldType->getDescription() << "\n";
#endif
OldType->removeAbstractTypeUser(this);
- I->second = (Type*)NewType; // TODO FIXME when types aren't const
+ // TODO FIXME when types aren't const
+ I->second = const_cast<Type *>(NewType);
if (NewType->isAbstract()) {
#if DEBUG_ABSTYPE
dbgs() << "Added type " << NewType->getDescription() << "\n";
diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp
index c18168d8a5c78..6ad427218d7b7 100644
--- a/lib/VMCore/Verifier.cpp
+++ b/lib/VMCore/Verifier.cpp
@@ -93,7 +93,7 @@ namespace { // Anonymous namespace for class
}
if (Broken)
- llvm_report_error("Broken module, no Basic Block terminator!");
+ report_fatal_error("Broken module, no Basic Block terminator!");
return false;
}
@@ -176,6 +176,10 @@ namespace {
/// Types - keep track of the types that have been checked already.
TypeSet Types;
+ /// MDNodes - keep track of the metadata nodes that have been checked
+ /// already.
+ SmallPtrSet<MDNode *, 32> MDNodes;
+
Verifier()
: FunctionPass(&ID),
Broken(false), RealPass(true), action(AbortProcessAction),
@@ -244,6 +248,10 @@ namespace {
I != E; ++I)
visitGlobalAlias(*I);
+ for (Module::named_metadata_iterator I = M.named_metadata_begin(),
+ E = M.named_metadata_end(); I != E; ++I)
+ visitNamedMDNode(*I);
+
// If the module is broken, abort at this time.
return abortIfBroken();
}
@@ -284,6 +292,8 @@ namespace {
void visitGlobalValue(GlobalValue &GV);
void visitGlobalVariable(GlobalVariable &GV);
void visitGlobalAlias(GlobalAlias &GA);
+ void visitNamedMDNode(NamedMDNode &NMD);
+ void visitMDNode(MDNode &MD, Function *F);
void visitFunction(Function &F);
void visitBasicBlock(BasicBlock &BB);
using InstVisitor<Verifier>::visit;
@@ -333,8 +343,6 @@ namespace {
int VT, unsigned ArgNo, std::string &Suffix);
void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
unsigned RetNum, unsigned ParamNum, ...);
- void VerifyFunctionLocalMetadata(MDNode *N, Function *F,
- SmallPtrSet<MDNode *, 32> &Visited);
void VerifyParameterAttrs(Attributes Attrs, const Type *Ty,
bool isReturnValue, const Value *V);
void VerifyFunctionAttrs(const FunctionType *FT, const AttrListPtr &Attrs,
@@ -489,6 +497,54 @@ void Verifier::visitGlobalAlias(GlobalAlias &GA) {
visitGlobalValue(GA);
}
+void Verifier::visitNamedMDNode(NamedMDNode &NMD) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i) {
+ MDNode *MD = NMD.getOperand(i);
+ if (!MD)
+ continue;
+
+ Assert2(!MD->isFunctionLocal(),
+ "Named metadata operand cannot be function local!", &NMD, MD);
+ visitMDNode(*MD, 0);
+ }
+}
+
+void Verifier::visitMDNode(MDNode &MD, Function *F) {
+ // Only visit each node once. Metadata can be mutually recursive, so this
+ // avoids infinite recursion here, as well as being an optimization.
+ if (!MDNodes.insert(&MD))
+ return;
+
+ for (unsigned i = 0, e = MD.getNumOperands(); i != e; ++i) {
+ Value *Op = MD.getOperand(i);
+ if (!Op)
+ continue;
+ if (isa<Constant>(Op) || isa<MDString>(Op) || isa<NamedMDNode>(Op))
+ continue;
+ if (MDNode *N = dyn_cast<MDNode>(Op)) {
+ Assert2(MD.isFunctionLocal() || !N->isFunctionLocal(),
+ "Global metadata operand cannot be function local!", &MD, N);
+ visitMDNode(*N, F);
+ continue;
+ }
+ Assert2(MD.isFunctionLocal(), "Invalid operand for global metadata!", &MD, Op);
+
+ // If this was an instruction, bb, or argument, verify that it is in the
+ // function that we expect.
+ Function *ActualF = 0;
+ if (Instruction *I = dyn_cast<Instruction>(Op))
+ ActualF = I->getParent()->getParent();
+ else if (BasicBlock *BB = dyn_cast<BasicBlock>(Op))
+ ActualF = BB->getParent();
+ else if (Argument *A = dyn_cast<Argument>(Op))
+ ActualF = A->getParent();
+ assert(ActualF && "Unimplemented function local metadata case!");
+
+ Assert2(ActualF == F, "function-local metadata used in wrong function",
+ &MD, Op);
+ }
+}
+
void Verifier::verifyTypeSymbolTable(TypeSymbolTable &ST) {
for (TypeSymbolTable::iterator I = ST.begin(), E = ST.end(); I != E; ++I)
VerifyType(I->second);
@@ -1553,38 +1609,6 @@ void Verifier::VerifyType(const Type *Ty) {
}
}
-/// VerifyFunctionLocalMetadata - Verify that the specified MDNode is local to
-/// specified Function.
-void Verifier::VerifyFunctionLocalMetadata(MDNode *N, Function *F,
- SmallPtrSet<MDNode *, 32> &Visited) {
- assert(N->isFunctionLocal() && "Should only be called on function-local MD");
-
- // Only visit each node once.
- if (!Visited.insert(N))
- return;
-
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
- Value *V = N->getOperand(i);
- if (!V) continue;
-
- Function *ActualF = 0;
- if (Instruction *I = dyn_cast<Instruction>(V))
- ActualF = I->getParent()->getParent();
- else if (BasicBlock *BB = dyn_cast<BasicBlock>(V))
- ActualF = BB->getParent();
- else if (Argument *A = dyn_cast<Argument>(V))
- ActualF = A->getParent();
- else if (MDNode *MD = dyn_cast<MDNode>(V))
- if (MD->isFunctionLocal())
- VerifyFunctionLocalMetadata(MD, F, Visited);
-
- // If this was an instruction, bb, or argument, verify that it is in the
- // function that we expect.
- Assert1(ActualF == 0 || ActualF == F,
- "function-local metadata used in wrong function", N);
- }
-}
-
// Flags used by TableGen to mark intrinsic parameters with the
// LLVMExtendedElementVectorType and LLVMTruncatedElementVectorType classes.
static const unsigned ExtendedElementVectorType = 0x40000000;
@@ -1604,11 +1628,8 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
// If the intrinsic takes MDNode arguments, verify that they are either global
// or are local to *this* function.
for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i)
- if (MDNode *MD = dyn_cast<MDNode>(CI.getOperand(i))) {
- if (!MD->isFunctionLocal()) continue;
- SmallPtrSet<MDNode *, 32> Visited;
- VerifyFunctionLocalMetadata(MD, CI.getParent()->getParent(), Visited);
- }
+ if (MDNode *MD = dyn_cast<MDNode>(CI.getOperand(i)))
+ visitMDNode(*MD, CI.getParent()->getParent());
switch (ID) {
default:
@@ -1933,7 +1954,9 @@ FunctionPass *llvm::createVerifierPass(VerifierFailureAction action) {
}
-// verifyFunction - Create
+/// verifyFunction - Check a function for errors, printing messages on stderr.
+/// Return true if the function is corrupt.
+///
bool llvm::verifyFunction(const Function &f, VerifierFailureAction action) {
Function &F = const_cast<Function&>(f);
assert(!F.isDeclaration() && "Cannot verify external functions");