aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2011-10-20 21:14:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2011-10-20 21:14:49 +0000
commit36981b17ed939300f6f8fc2355a255f711fcef71 (patch)
treeee2483e98b09cac943dc93a6969d83ca737ff139 /lib/CodeGen
parent180abc3db9ae3b4fc63cd65b15697e6ffcc8a657 (diff)
downloadsrc-36981b17ed939300f6f8fc2355a255f711fcef71.tar.gz
src-36981b17ed939300f6f8fc2355a255f711fcef71.zip
Notes
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/BackendUtil.cpp52
-rw-r--r--lib/CodeGen/CGBlocks.cpp162
-rw-r--r--lib/CodeGen/CGBlocks.h2
-rw-r--r--lib/CodeGen/CGBuiltin.cpp495
-rw-r--r--lib/CodeGen/CGCUDANV.cpp126
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp55
-rw-r--r--lib/CodeGen/CGCUDARuntime.h54
-rw-r--r--lib/CodeGen/CGCXX.cpp42
-rw-r--r--lib/CodeGen/CGCXXABI.cpp10
-rw-r--r--lib/CodeGen/CGCXXABI.h8
-rw-r--r--lib/CodeGen/CGCall.cpp285
-rw-r--r--lib/CodeGen/CGCall.h10
-rw-r--r--lib/CodeGen/CGClass.cpp92
-rw-r--r--lib/CodeGen/CGCleanup.cpp383
-rw-r--r--lib/CodeGen/CGCleanup.h323
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp499
-rw-r--r--lib/CodeGen/CGDebugInfo.h71
-rw-r--r--lib/CodeGen/CGDecl.cpp184
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp25
-rw-r--r--lib/CodeGen/CGException.cpp692
-rw-r--r--lib/CodeGen/CGException.h12
-rw-r--r--lib/CodeGen/CGExpr.cpp502
-rw-r--r--lib/CodeGen/CGExprAgg.cpp156
-rw-r--r--lib/CodeGen/CGExprCXX.cpp284
-rw-r--r--lib/CodeGen/CGExprComplex.cpp83
-rw-r--r--lib/CodeGen/CGExprConstant.cpp111
-rw-r--r--lib/CodeGen/CGExprScalar.cpp204
-rw-r--r--lib/CodeGen/CGObjC.cpp1362
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp378
-rw-r--r--lib/CodeGen/CGObjCMac.cpp383
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp45
-rw-r--r--lib/CodeGen/CGObjCRuntime.h3
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp28
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h46
-rw-r--r--lib/CodeGen/CGRTTI.cpp50
-rw-r--r--lib/CodeGen/CGRecordLayout.h11
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp32
-rw-r--r--lib/CodeGen/CGStmt.cpp129
-rw-r--r--lib/CodeGen/CGVTT.cpp421
-rw-r--r--lib/CodeGen/CGVTables.cpp2688
-rw-r--r--lib/CodeGen/CGVTables.h168
-rw-r--r--lib/CodeGen/CGValue.h109
-rw-r--r--lib/CodeGen/CMakeLists.txt3
-rw-r--r--lib/CodeGen/CodeGenAction.cpp29
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp96
-rw-r--r--lib/CodeGen/CodeGenFunction.h197
-rw-r--r--lib/CodeGen/CodeGenModule.cpp712
-rw-r--r--lib/CodeGen/CodeGenModule.h156
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp4
-rw-r--r--lib/CodeGen/CodeGenTBAA.h4
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp44
-rw-r--r--lib/CodeGen/CodeGenTypes.h10
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp128
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp4
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp12
-rw-r--r--lib/CodeGen/TargetInfo.cpp420
-rw-r--r--lib/CodeGen/TargetInfo.h42
57 files changed, 5809 insertions, 6827 deletions
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 85f42db81f59..b9e3ed9edd19 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -15,6 +15,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
@@ -23,21 +24,24 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/PassManagerBuilder.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegistry.h"
#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar.h"
using namespace clang;
using namespace llvm;
namespace {
class EmitAssemblyHelper {
- Diagnostic &Diags;
+ DiagnosticsEngine &Diags;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
const LangOptions &LangOpts;
@@ -82,7 +86,7 @@ private:
bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
public:
- EmitAssemblyHelper(Diagnostic &_Diags,
+ EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M)
@@ -237,27 +241,18 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
TargetMachine::setDataSections (CodeGenOpts.DataSections);
// FIXME: Parse this earlier.
- if (CodeGenOpts.RelocationModel == "static") {
- TargetMachine::setRelocationModel(llvm::Reloc::Static);
- } else if (CodeGenOpts.RelocationModel == "pic") {
- TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
- } else {
- assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
- "Invalid PIC model!");
- TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
- }
- // FIXME: Parse this earlier.
+ llvm::CodeModel::Model CM;
if (CodeGenOpts.CodeModel == "small") {
- TargetMachine::setCodeModel(llvm::CodeModel::Small);
+ CM = llvm::CodeModel::Small;
} else if (CodeGenOpts.CodeModel == "kernel") {
- TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
+ CM = llvm::CodeModel::Kernel;
} else if (CodeGenOpts.CodeModel == "medium") {
- TargetMachine::setCodeModel(llvm::CodeModel::Medium);
+ CM = llvm::CodeModel::Medium;
} else if (CodeGenOpts.CodeModel == "large") {
- TargetMachine::setCodeModel(llvm::CodeModel::Large);
+ CM = llvm::CodeModel::Large;
} else {
assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
- TargetMachine::setCodeModel(llvm::CodeModel::Default);
+ CM = llvm::CodeModel::Default;
}
std::vector<const char *> BackendArgs;
@@ -274,6 +269,8 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
BackendArgs.push_back("-time-passes");
for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
+ if (CodeGenOpts.NoGlobalMerge)
+ BackendArgs.push_back("-global-merge=false");
BackendArgs.push_back(0);
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
const_cast<char **>(&BackendArgs[0]));
@@ -287,8 +284,20 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
Features.AddFeature(*it);
FeaturesStr = Features.getString();
}
+
+ llvm::Reloc::Model RM = llvm::Reloc::Default;
+ if (CodeGenOpts.RelocationModel == "static") {
+ RM = llvm::Reloc::Static;
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ RM = llvm::Reloc::PIC_;
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ RM = llvm::Reloc::DynamicNoPIC;
+ }
+
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
- FeaturesStr);
+ FeaturesStr, RM, CM);
if (CodeGenOpts.RelaxAll)
TM->setMCRelaxAll(true);
@@ -386,7 +395,8 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
}
}
-void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
+ const CodeGenOptions &CGOpts,
const TargetOptions &TOpts,
const LangOptions &LOpts,
Module *M,
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 9815d1d4ef4d..969495376642 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -59,10 +59,10 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
- const llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
- const llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
- llvm::SmallVector<llvm::Constant*, 6> elements;
+ SmallVector<llvm::Constant*, 6> elements;
// reserved
elements.push_back(llvm::ConstantInt::get(ulong, 0));
@@ -243,7 +243,7 @@ static CharUnits getLowBit(CharUnits v) {
}
static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
- llvm::SmallVectorImpl<llvm::Type*> &elementTypes) {
+ SmallVectorImpl<llvm::Type*> &elementTypes) {
ASTContext &C = CGM.getContext();
// The header is basically a 'struct { void *; int; int; void *; void *; }'.
@@ -280,7 +280,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
ASTContext &C = CGM.getContext();
const BlockDecl *block = info.getBlockDecl();
- llvm::SmallVector<llvm::Type*, 8> elementTypes;
+ SmallVector<llvm::Type*, 8> elementTypes;
initializeForBlockHeader(CGM, info, elementTypes);
if (!block->hasCaptures()) {
@@ -291,7 +291,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
}
// Collect the layout chunks.
- llvm::SmallVector<BlockLayoutChunk, 16> layout;
+ SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
(block->capture_end() - block->capture_begin()));
@@ -422,7 +422,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// which has 7 bytes of padding, as opposed to the naive solution
// which might have less (?).
if (endAlign < maxFieldAlign) {
- llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ SmallVectorImpl<BlockLayoutChunk>::iterator
li = layout.begin() + 1, le = layout.end();
// Look for something that the header end is already
@@ -433,7 +433,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// If we found something that's naturally aligned for the end of
// the header, keep adding things...
if (li != le) {
- llvm::SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
for (; li != le; ++li) {
assert(endAlign >= li->Alignment);
@@ -468,7 +468,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CGBlockInfo &info) {
// Slam everything else on now. This works because they have
// strictly decreasing alignment and we expect that size is always a
// multiple of alignment.
- for (llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
li = layout.begin(), le = layout.end(); li != le; ++li) {
assert(endAlign >= li->Alignment);
li->setIndex(info, elementTypes.size());
@@ -507,7 +507,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// Build the block descriptor.
llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
- const llvm::Type *intTy = ConvertType(getContext().IntTy);
+ llvm::Type *intTy = ConvertType(getContext().IntTy);
llvm::AllocaInst *blockAddr =
CreateTempAlloca(blockInfo.StructureType, "block");
@@ -617,10 +617,9 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
declRef, VK_RValue);
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
- LValue::MakeAddr(blockField, type,
- getContext().getDeclAlign(variable)
- .getQuantity(),
- getContext()),
+ MakeAddrLValue(blockField, type,
+ getContext().getDeclAlign(variable)
+ .getQuantity()),
/*captured by init*/ false);
}
@@ -681,8 +680,8 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
// const char *layout; // reserved
// };
BlockDescriptorType =
- llvm::StructType::createNamed("struct.__block_descriptor",
- UnsignedLongTy, UnsignedLongTy, NULL);
+ llvm::StructType::create("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, NULL);
// Now form a pointer to that.
BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
@@ -703,13 +702,9 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
// struct __block_descriptor *__descriptor;
// };
GenericBlockLiteralType =
- llvm::StructType::createNamed("struct.__block_literal_generic",
- VoidPtrTy,
- IntTy,
- IntTy,
- VoidPtrTy,
- BlockDescPtrTy,
- NULL);
+ llvm::StructType::create("struct.__block_literal_generic",
+ VoidPtrTy, IntTy, IntTy, VoidPtrTy,
+ BlockDescPtrTy, NULL);
return GenericBlockLiteralType;
}
@@ -723,7 +718,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
// Get a pointer to the generic block literal.
- const llvm::Type *BlockLiteralTy =
+ llvm::Type *BlockLiteralTy =
llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
// Bitcast the callee to a block literal.
@@ -731,9 +726,9 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
- llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3);
- BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy, "tmp");
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
// Add the block literal.
CallArgList Args;
@@ -746,20 +741,16 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
E->arg_begin(), E->arg_end());
// Load the function.
- llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr);
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
- QualType ResultType = FuncTy->getResultType();
-
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().getFunctionInfo(ResultType, Args,
- FuncTy->getExtInfo());
+ const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FuncTy);
// Cast the function pointer to the right type.
- const llvm::Type *BlockFTy =
+ llvm::Type *BlockFTy =
CGM.getTypes().GetFunctionType(FnInfo, false);
- const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
// And call the block.
@@ -783,7 +774,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
// to byref*.
addr = Builder.CreateLoad(addr);
- const llvm::PointerType *byrefPointerType
+ llvm::PointerType *byrefPointerType
= llvm::PointerType::get(BuildByRefType(variable), 0);
addr = Builder.CreateBitCast(addr, byrefPointerType,
"byref.addr");
@@ -863,7 +854,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
literal->setAlignment(blockInfo.BlockAlign.getQuantity());
// Return a constant of the appropriately-casted type.
- const llvm::Type *requiredType =
+ llvm::Type *requiredType =
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
return llvm::ConstantExpr::getBitCast(literal, requiredType);
}
@@ -918,7 +909,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
if (CGM.ReturnTypeUsesSRet(fnInfo))
blockInfo.UsesStret = true;
- const llvm::FunctionType *fnLLVMType =
+ llvm::FunctionType *fnLLVMType =
CGM.getTypes().GetFunctionType(fnInfo, fnType->isVariadic());
MangleBuffer name;
@@ -1005,7 +996,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
ce = blockDecl->capture_end(); ci != ce; ++ci) {
const VarDecl *variable = ci->getVariable();
- DI->setLocation(variable->getLocation());
+ DI->EmitLocation(Builder, variable->getLocation());
const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
if (capture.isConstant()) {
@@ -1065,7 +1056,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// FIXME: it would be nice if these were mergeable with things with
// identical semantics.
- const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1088,7 +1079,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
true);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
- const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
@@ -1180,7 +1171,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
- const llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
@@ -1201,7 +1192,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
false, true);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
- const llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
src = Builder.CreateLoad(src);
@@ -1399,7 +1390,7 @@ public:
static llvm::Constant *
generateByrefCopyHelper(CodeGenFunction &CGF,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &byrefInfo) {
ASTContext &Context = CGF.getContext();
@@ -1416,7 +1407,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGF.CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1438,7 +1429,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
- const llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+ llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
// dst->x
llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
@@ -1462,7 +1453,7 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
/// Build the copy helper for a __block variable.
static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &info) {
CodeGenFunction CGF(CGM);
return generateByrefCopyHelper(CGF, byrefType, info);
@@ -1471,7 +1462,7 @@ static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
/// Generate code for a __block variable's dispose helper.
static llvm::Constant *
generateByrefDisposeHelper(CodeGenFunction &CGF,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &byrefInfo) {
ASTContext &Context = CGF.getContext();
QualType R = Context.VoidTy;
@@ -1484,7 +1475,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
CGF.CGM.getTypes().getFunctionInfo(R, args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGF.CGM.getTypes();
- const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1521,7 +1512,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
/// Build the dispose helper for a __block variable.
static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
- const llvm::StructType &byrefType,
+ llvm::StructType &byrefType,
CodeGenModule::ByrefHelpers &info) {
CodeGenFunction CGF(CGM);
return generateByrefDisposeHelper(CGF, byrefType, info);
@@ -1529,7 +1520,7 @@ static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
///
template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
- const llvm::StructType &byrefTy,
+ llvm::StructType &byrefTy,
T &byrefInfo) {
// Increase the field's alignment to be at least pointer alignment,
// since the layout of the byref struct will guarantee at least that.
@@ -1553,7 +1544,7 @@ template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
}
CodeGenModule::ByrefHelpers *
-CodeGenFunction::buildByrefHelpers(const llvm::StructType &byrefType,
+CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission) {
const VarDecl &var = *emission.Variable;
QualType type = var.getType();
@@ -1658,18 +1649,18 @@ llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
/// T x;
/// } x
///
-const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
- std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
+ std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
if (Info.first)
return Info.first;
QualType Ty = D->getType();
- llvm::SmallVector<llvm::Type *, 8> types;
+ SmallVector<llvm::Type *, 8> types;
llvm::StructType *ByRefType =
- llvm::StructType::createNamed(getLLVMContext(),
- "struct.__block_byref_" + D->getNameAsString());
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
// void *__isa;
types.push_back(Int8PtrTy);
@@ -1742,7 +1733,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
llvm::Value *addr = emission.Address;
// That's an alloca of the byref structure type.
- const llvm::StructType *byrefType = cast<llvm::StructType>(
+ llvm::StructType *byrefType = cast<llvm::StructType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
// Build the byref helpers if necessary. This is null if we don't need any.
@@ -1812,8 +1803,63 @@ namespace {
/// to be done externally.
void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
// We don't enter this cleanup if we're in pure-GC mode.
- if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
return;
EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
}
+
+/// Adjust the declaration of something from the blocks API.
+static void configureBlocksRuntimeObject(CodeGenModule &CGM,
+ llvm::Constant *C) {
+ if (!CGM.getLangOptions().BlocksRuntimeOptional) return;
+
+ llvm::GlobalValue *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
+ if (GV->isDeclaration() &&
+ GV->getLinkage() == llvm::GlobalValue::ExternalLinkage)
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose");
+ configureBlocksRuntimeObject(*this, BlockObjectDispose);
+ return BlockObjectDispose;
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign");
+ configureBlocksRuntimeObject(*this, BlockObjectAssign);
+ return BlockObjectAssign;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
+ return NSConcreteStackBlock;
+}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 4d8dead2be8c..6e71c1fdc041 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -176,7 +176,7 @@ public:
/// because it gets set later in the block-creation process.
mutable bool UsesStret : 1;
- const llvm::StructType *StructureType;
+ llvm::StructType *StructureType;
const BlockExpr *Block;
CharUnits BlockSize;
CharUnits BlockAlign;
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 1566bd9e6697..ec0ca424220e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -27,24 +27,34 @@ using namespace clang;
using namespace CodeGen;
using namespace llvm;
-static void EmitMemoryBarrier(CodeGenFunction &CGF,
- bool LoadLoad, bool LoadStore,
- bool StoreLoad, bool StoreStore,
- bool Device) {
- Value *True = CGF.Builder.getTrue();
- Value *False = CGF.Builder.getFalse();
- Value *C[5] = { LoadLoad ? True : False,
- LoadStore ? True : False,
- StoreLoad ? True : False,
- StoreStore ? True : False,
- Device ? True : False };
- CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C);
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else
+ Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+
+ llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::IntegerType *IntType) {
+ QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
@@ -55,7 +65,7 @@ static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::Type *ResultType) {
+ QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
@@ -65,25 +75,11 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
-// The atomic builtins are also full memory barriers. This is a utility for
-// wrapping a call to the builtins with memory barriers.
-static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
- ArrayRef<Value *> Args) {
- // FIXME: We need a target hook for whether this applies to device memory or
- // not.
- bool Device = true;
-
- // Create barriers both before and after the call.
- EmitMemoryBarrier(CGF, true, true, true, true, Device);
- Value *Result = CGF.Builder.CreateCall(Fn, Args);
- EmitMemoryBarrier(CGF, true, true, true, true, Device);
- return Result;
-}
-
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
- Intrinsic::ID Id, const CallExpr *E) {
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -99,16 +95,15 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
-
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -117,7 +112,8 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
- Intrinsic::ID Id, const CallExpr *E,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E,
Instruction::BinaryOps Op) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
@@ -134,16 +130,15 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
-
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
@@ -157,21 +152,26 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
StringRef FnName;
switch (ValTyP->getKind()) {
- default: assert(0 && "Isn't a scalar fp type!");
+ default: llvm_unreachable("Isn't a scalar fp type!");
case BuiltinType::Float: FnName = "fabsf"; break;
case BuiltinType::Double: FnName = "fabs"; break;
case BuiltinType::LongDouble: FnName = "fabsl"; break;
}
// The prototype is something that takes and returns whatever V's type is.
- llvm::Type *ArgTys[] = { V->getType() };
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), ArgTys,
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
false);
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
return CGF.Builder.CreateCall(Fn, V, "abs");
}
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
+ const CallExpr *E, llvm::Value *calleeValue) {
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
+ ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
@@ -195,7 +195,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType = Int8PtrTy;
+ llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
@@ -208,7 +208,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type = Int8PtrTy;
+ llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@@ -236,8 +236,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -251,8 +251,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -267,9 +267,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
- llvm::ConstantInt::get(ArgType, 1), "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue),
+ llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
@@ -287,10 +287,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
- Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
- "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue);
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -304,8 +303,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -327,7 +326,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
- return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
// We pass this builtin onto the optimizer so that it can
@@ -381,7 +380,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
- return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
}
case Builtin::BI__builtin_isgreater:
@@ -396,7 +395,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
- default: assert(0 && "Unknown ordered comparison");
+ default: llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
@@ -417,13 +416,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
}
// ZExt bool to int type.
- return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
- "tmp"));
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
Value *V = EmitScalarExpr(E->getArg(0));
V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf: {
@@ -432,7 +430,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = EmitFAbs(*this, V, E->getArg(0)->getType());
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
// TODO: BI__builtin_isinf_sign
@@ -457,7 +455,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_isfinite: {
- // isfinite(x) --> x == x && fabs(x) != infinity; }
+ // isfinite(x) --> x == x && fabs(x) != infinity;
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
@@ -471,7 +469,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
- const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
@@ -530,7 +528,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp"));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
@@ -550,11 +548,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Dest = EmitScalarExpr(E->getArg(0));
@@ -575,11 +572,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Dest = EmitScalarExpr(E->getArg(0));
@@ -608,11 +604,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Address = EmitScalarExpr(E->getArg(0));
@@ -640,13 +635,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -661,7 +656,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
- const llvm::IntegerType *Ty
+ llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
@@ -680,7 +675,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
@@ -775,82 +770,82 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_swap:
- assert(0 && "Shouldn't make it through sema");
+ llvm_unreachable("Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
case Builtin::BI__sync_fetch_and_max:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
case Builtin::BI__sync_fetch_and_umin:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
case Builtin::BI__sync_fetch_and_umax:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
llvm::Instruction::Sub);
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
llvm::Instruction::And);
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
llvm::Instruction::Or);
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
case Builtin::BI__sync_val_compare_and_swap_1:
@@ -867,18 +862,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Result = EmitCallWithBarrier(*this, AtomF, Args);
+ Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
Result = EmitFromInt(*this, Result, T, ValueType);
return RValue::get(Result);
}
@@ -897,9 +890,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -907,7 +897,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
Value *OldVal = Args[1];
- Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args);
+ Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
@@ -919,14 +910,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
@@ -934,32 +925,95 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ElTy =
+ llvm::Type *ElLLVMTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
- Store->setVolatile(true);
+ Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr);
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ Store->setAlignment(StoreSize.getQuantity());
+ Store->setAtomic(llvm::Release);
return RValue::get(0);
}
case Builtin::BI__sync_synchronize: {
- // We assume like gcc appears to, that this only applies to cached memory.
- EmitMemoryBarrier(*this, true, true, true, true, false);
+ // We assume this is supposed to correspond to a C++0x-style
+ // sequentially-consistent fence (i.e. this is only usable for
+ // synchonization, not device I/O or anything like that). This intrinsic
+ // is really badly designed in the sense that in theory, there isn't
+ // any way to safely use it... but in practice, it mostly works
+ // to use it with non-atomic loads and stores to get acquire/release
+ // semantics.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
return RValue::get(0);
}
- case Builtin::BI__builtin_llvm_memory_barrier: {
- Value *C[5] = {
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)),
- EmitScalarExpr(E->getArg(4))
- };
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C);
+ case Builtin::BI__atomic_thread_fence:
+ case Builtin::BI__atomic_signal_fence: {
+ llvm::SynchronizationScope Scope;
+ if (BuiltinID == Builtin::BI__atomic_signal_fence)
+ Scope = llvm::SingleThread;
+ else
+ Scope = llvm::CrossThread;
+ Value *Order = EmitScalarExpr(E->getArg(0));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Builder.CreateFence(llvm::Acquire, Scope);
+ break;
+ case 3: // memory_order_release
+ Builder.CreateFence(llvm::Release, Scope);
+ break;
+ case 4: // memory_order_acq_rel
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ break;
+ case 5: // memory_order_seq_cst
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ ReleaseBB = createBasicBlock("release", CurFn);
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+ Builder.SetInsertPoint(AcquireBB);
+ Builder.CreateFence(llvm::Acquire, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+
+ Builder.SetInsertPoint(ReleaseBB);
+ Builder.CreateFence(llvm::Release, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+ Builder.SetInsertPoint(AcqRelBB);
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ Builder.SetInsertPoint(ContBB);
return RValue::get(0);
}
-
+
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
@@ -982,7 +1036,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
- return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
}
case Builtin::BIfma:
@@ -997,8 +1051,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
return RValue::get(Builder.CreateCall3(F, FirstArg,
EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)),
- "tmp"));
+ EmitScalarExpr(E->getArg(2))));
}
case Builtin::BI__builtin_signbit:
@@ -1007,25 +1060,40 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
LLVMContext &C = CGM.getLLVMContext();
Value *Arg = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgTy = Arg->getType();
+ llvm::Type *ArgTy = Arg->getType();
if (ArgTy->isPPC_FP128Ty())
break; // FIXME: I'm not sure what the right implementation is here.
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
- const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
}
+ case Builtin::BI__builtin_annotation: {
+ llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ AnnVal->getType());
+
+ // Get the annotation string, go through casts. Sema requires this to be a
+ // non-wide string literal, potentially casted, so the cast<> is safe.
+ const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
+ llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
+ return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ }
}
- // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
- // that function.
- if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
- getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return EmitCall(E->getCallee()->getType(),
- CGM.getBuiltinLibFunction(FD, BuiltinID),
- ReturnValueSlot(), E->arg_begin(), E->arg_end(), FD);
+ // If this is an alias for a lib function (e.g. __builtin_sin), emit
+ // the call using the normal call path, but using the unmangled
+ // version of the function name.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E,
+ CGM.getBuiltinLibFunction(FD, BuiltinID));
+
+ // If this is a predefined lib function (e.g. malloc), emit the call
+ // using exactly the normal call path.
+ if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
@@ -1045,7 +1113,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
- const llvm::FunctionType *FTy = F->getFunctionType();
+ llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
@@ -1064,7 +1132,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
- const llvm::Type *PTy = FTy->getParamType(i);
+ llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
@@ -1077,7 +1145,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
- const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
+ llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -1154,12 +1222,12 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
return Builder.CreateCall(F, Ops, name);
}
-Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
ConstantInt *CI = cast<ConstantInt>(V);
int SV = CI->getSExtValue();
- const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
return llvm::ConstantVector::get(CV);
@@ -1193,12 +1261,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const FunctionDecl *FD = E->getDirectCallee();
// Oddly people write this call without args on occasion and gcc accepts
// it - it's also marked as varargs in the description file.
- llvm::SmallVector<Value*, 2> Ops;
+ SmallVector<Value*, 2> Ops;
for (unsigned i = 0; i < E->getNumArgs(); i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
- const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
- const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
- llvm::StringRef Name = FD->getName();
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
@@ -1223,7 +1291,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp");
+ Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
@@ -1236,10 +1304,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
}
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // vget_lane and vset_lane are not overloaded and do not have an extra
+ // argument that specifies the vector type.
+ switch (BuiltinID) {
+ default: break;
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ }
+
+ // Get the last argument, which specifies the vector type.
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
@@ -1382,18 +1481,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
}
- case ARM::BI__builtin_neon_vget_lane_i8:
- case ARM::BI__builtin_neon_vget_lane_i16:
- case ARM::BI__builtin_neon_vget_lane_i32:
- case ARM::BI__builtin_neon_vget_lane_i64:
- case ARM::BI__builtin_neon_vget_lane_f32:
- case ARM::BI__builtin_neon_vgetq_lane_i8:
- case ARM::BI__builtin_neon_vgetq_lane_i16:
- case ARM::BI__builtin_neon_vgetq_lane_i32:
- case ARM::BI__builtin_neon_vgetq_lane_i64:
- case ARM::BI__builtin_neon_vgetq_lane_f32:
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
case ARM::BI__builtin_neon_vhadd_v:
case ARM::BI__builtin_neon_vhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
@@ -1457,9 +1544,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld2_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1471,9 +1556,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1486,9 +1569,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1508,7 +1589,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld2;
break;
- default: assert(0 && "unknown vld_dup intrinsic?");
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
@@ -1527,10 +1608,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
break;
- default: assert(0 && "unknown vld_dup intrinsic?");
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
@@ -1562,14 +1643,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case ARM::BI__builtin_neon_vmovl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case ARM::BI__builtin_neon_vmovn_v: {
- const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
@@ -1587,7 +1668,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
@@ -1602,7 +1683,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
@@ -1729,18 +1810,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vrsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
Ops, "vrsubhn");
- case ARM::BI__builtin_neon_vset_lane_i8:
- case ARM::BI__builtin_neon_vset_lane_i16:
- case ARM::BI__builtin_neon_vset_lane_i32:
- case ARM::BI__builtin_neon_vset_lane_i64:
- case ARM::BI__builtin_neon_vset_lane_f32:
- case ARM::BI__builtin_neon_vsetq_lane_i8:
- case ARM::BI__builtin_neon_vsetq_lane_i16:
- case ARM::BI__builtin_neon_vsetq_lane_i32:
- case ARM::BI__builtin_neon_vsetq_lane_i64:
- case ARM::BI__builtin_neon_vsetq_lane_f32:
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case ARM::BI__builtin_neon_vshl_v:
case ARM::BI__builtin_neon_vshlq_v:
Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
@@ -1921,7 +1990,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
llvm::Value *CodeGenFunction::
-BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
+BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
@@ -1949,7 +2018,7 @@ BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
@@ -1983,7 +2052,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
@@ -1992,7 +2061,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: assert(0 && "Unsupported shift intrinsic!");
+ default: llvm_unreachable("Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi128:
name = "pslldi";
ID = Intrinsic::x86_sse2_psll_d;
@@ -2046,13 +2115,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: assert(0 && "Unsupported shift intrinsic!");
+ default: llvm_unreachable("Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi:
name = "pslldi";
ID = Intrinsic::x86_mmx_psll_d;
@@ -2098,19 +2167,19 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
- One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp, PtrTy));
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_cmppd: {
@@ -2144,7 +2213,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
- llvm::SmallVector<llvm::Constant*, 8> Indices;
+ SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
@@ -2156,17 +2225,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
- // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
@@ -2175,7 +2244,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
- llvm::SmallVector<llvm::Constant*, 16> Indices;
+ SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
@@ -2186,14 +2255,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2352,7 +2421,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
@@ -2373,11 +2442,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
{
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
+ default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
@@ -2412,11 +2481,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
{
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported st intrinsic!");
+ default: llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
new file mode 100644
index 000000000000..88a0bdc821d7
--- /dev/null
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -0,0 +1,126 @@
+//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for CUDA code generation targeting the NVIDIA CUDA
+// runtime library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Support/CallSite.h"
+
+#include <vector>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGNVCUDARuntime : public CGCUDARuntime {
+
+private:
+ llvm::Type *IntTy, *SizeTy;
+ llvm::PointerType *CharPtrTy, *VoidPtrTy;
+
+ llvm::Constant *getSetupArgumentFn() const;
+ llvm::Constant *getLaunchFn() const;
+
+public:
+ CGNVCUDARuntime(CodeGenModule &CGM);
+
+ void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+};
+
+}
+
+CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ SizeTy = Types.ConvertType(Ctx.getSizeType());
+
+ CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
+ VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
+}
+
+llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
+ // cudaError_t cudaSetupArgument(void *, size_t, size_t)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(VoidPtrTy);
+ Params.push_back(SizeTy);
+ Params.push_back(SizeTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaSetupArgument");
+}
+
+llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
+ // cudaError_t cudaLaunch(char *)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(CharPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaLaunch");
+}
+
+void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the argument value list and the argument stack struct type.
+ llvm::SmallVector<llvm::Value *, 16> ArgValues;
+ std::vector<llvm::Type *> ArgTypes;
+ for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I);
+ ArgValues.push_back(V);
+ assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
+ ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
+ }
+ llvm::StructType *ArgStackTy = llvm::StructType::get(
+ CGF.getLLVMContext(), ArgTypes);
+
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+
+ // Emit the calls to cudaSetupArgument
+ llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
+ for (unsigned I = 0, E = Args.size(); I != E; ++I) {
+ llvm::Value *Args[3];
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
+ Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
+ Args[1] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
+ SizeTy, false);
+ Args[2] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
+ SizeTy, false);
+ llvm::CallSite CS = CGF.EmitCallOrInvoke(cudaSetupArgFn, Args);
+ llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
+ llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
+ CGF.EmitBlock(NextBlock);
+ }
+
+ // Emit the call to cudaLaunch
+ llvm::Constant *cudaLaunchFn = getLaunchFn();
+ llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
+ CGF.EmitCallOrInvoke(cudaLaunchFn, Arg);
+ CGF.EmitBranch(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+}
+
+CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
+ return new CGNVCUDARuntime(CGM);
+}
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
new file mode 100644
index 000000000000..77dc248d69e6
--- /dev/null
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -0,0 +1,55 @@
+//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCUDARuntime::~CGCUDARuntime() {}
+
+RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(ConfigOKBlock);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ }
+ }
+
+ llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
+ CGF.EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ return RValue::get(0);
+}
diff --git a/lib/CodeGen/CGCUDARuntime.h b/lib/CodeGen/CGCUDARuntime.h
new file mode 100644
index 000000000000..a99a67ae1ae7
--- /dev/null
+++ b/lib/CodeGen/CGCUDARuntime.h
@@ -0,0 +1,54 @@
+//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CUDARUNTIME_H
+#define CLANG_CODEGEN_CUDARUNTIME_H
+
+namespace clang {
+
+class CUDAKernelCallExpr;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+class FunctionArgList;
+class ReturnValueSlot;
+class RValue;
+
+class CGCUDARuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGCUDARuntime();
+
+ virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ virtual void EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) = 0;
+
+};
+
+/// Creates an instance of a CUDA runtime class.
+CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index f6fc202eaae2..b5e6e0d7d993 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -138,7 +138,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return true;
// Derive the type for the alias.
- const llvm::PointerType *AliasType
+ llvm::PointerType *AliasType
= getTypes().GetFunctionType(AliasDecl)->getPointerTo();
// Find the referrent. Some aliases might require a bitcast, in
@@ -154,7 +154,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
// Switch any previous uses to the alias.
- llvm::StringRef MangledName = getMangledName(AliasDecl);
+ StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
assert(Entry->isDeclaration() && "definition already exists for alias");
@@ -214,14 +214,14 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
const CGFunctionInfo *fnInfo) {
GlobalDecl GD(ctor, ctorType);
- llvm::StringRef name = getMangledName(GD);
+ StringRef name = getMangledName(GD);
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(ctor, ctorType);
const FunctionProtoType *proto = ctor->getType()->castAs<FunctionProtoType>();
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
getTypes().GetFunctionType(*fnInfo, proto->isVariadic());
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
@@ -236,11 +236,7 @@ void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
// The destructor used for destructing this as a most-derived class;
// call the base destructor and then destructs any virtual bases.
- if (!D->getParent()->isAbstract() || D->isVirtual()) {
- // We don't need to emit the complete ctor if the class is abstract,
- // unless the destructor is virtual and needs to be in the vtable.
- EmitGlobal(GlobalDecl(D, Dtor_Complete));
- }
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
// The destructor used for destructing this as a base class; ignores
// virtual bases.
@@ -282,13 +278,13 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
const CGFunctionInfo *fnInfo) {
GlobalDecl GD(dtor, dtorType);
- llvm::StringRef name = getMangledName(GD);
+ StringRef name = getMangledName(GD);
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
if (!fnInfo) fnInfo = &getTypes().getFunctionInfo(dtor, dtorType);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
getTypes().GetFunctionType(*fnInfo, false);
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
@@ -296,7 +292,7 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
}
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
- llvm::Value *This, const llvm::Type *Ty) {
+ llvm::Value *This, llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo();
llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
@@ -307,9 +303,9 @@ static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
MD = MD->getCanonicalDecl();
- uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
@@ -320,7 +316,7 @@ CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
llvm::Value *
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
llvm::Value *VTable = 0;
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
@@ -339,9 +335,10 @@ CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
VTable = Builder.CreateBitCast(VTable, Ty);
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
MD = MD->getCanonicalDecl();
- uint64_t VTableIndex = CGM.getVTables().getMethodVTableIndex(MD);
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
uint64_t AddressPoint =
- CGM.getVTables().getAddressPoint(BaseSubobject(RD, CharUnits::Zero()), RD);
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
@@ -366,7 +363,7 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
&CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
Dtor_Complete);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty
+ llvm::Type *Ty
= CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
@@ -374,9 +371,10 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
VTable = Builder.CreateBitCast(VTable, Ty);
DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
uint64_t VTableIndex =
- CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
uint64_t AddressPoint =
- CGM.getVTables().getAddressPoint(BaseSubobject(RD, CharUnits::Zero()), RD);
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
@@ -387,10 +385,10 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *This, const llvm::Type *Ty) {
+ llvm::Value *This, llvm::Type *Ty) {
DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
uint64_t VTableIndex =
- CGM.getVTables().getMethodVTableIndex(GlobalDecl(DD, Type));
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index dcc28b45cfc3..248448ccdc2e 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -20,9 +20,9 @@ using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
static void ErrorUnsupportedABI(CodeGenFunction &CGF,
- llvm::StringRef S) {
- Diagnostic &Diags = CGF.CGM.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ StringRef S) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet compile %1 in this ABI");
Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
DiagID)
@@ -49,7 +49,7 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
FPT->isVariadic());
return llvm::Constant::getNullValue(FTy->getPointerTo());
@@ -60,7 +60,7 @@ llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
return llvm::Constant::getNullValue(Ty);
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 29f299a43e37..c2abf358329c 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -15,14 +15,14 @@
#ifndef CLANG_CODEGEN_CXXABI_H
#define CLANG_CODEGEN_CXXABI_H
+#include "clang/Basic/LLVM.h"
+
#include "CodeGenFunction.h"
namespace llvm {
class Constant;
class Type;
class Value;
-
- template <class T> class SmallVectorImpl;
}
namespace clang {
@@ -151,7 +151,7 @@ public:
virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Build the signature of the given destructor variant by adding
/// any required parameters. For convenience, ResTy has been
@@ -160,7 +160,7 @@ public:
virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Build the ABI-specific portion of the parameter list for a
/// function. This generally involves a 'this' parameter and
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index f8783ad08d00..6ae2d0c96775 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -69,14 +69,14 @@ static CanQualType GetReturnType(QualType RetTy) {
const CGFunctionInfo &
CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
- llvm::SmallVector<CanQualType, 16>(),
+ SmallVector<CanQualType, 16>(),
FTNP->getExtInfo());
}
/// \param Args - contains any initial parameters besides those
/// in the formal type
static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
- llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ SmallVectorImpl<CanQualType> &ArgTys,
CanQual<FunctionProtoType> FTP) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
@@ -87,7 +87,7 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
const CGFunctionInfo &
CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
return ::getFunctionInfo(*this, ArgTys, FTP);
}
@@ -113,7 +113,7 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
const FunctionProtoType *FTP) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
// Add the 'this' pointer.
ArgTys.push_back(GetThisType(Context, RD));
@@ -123,7 +123,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
@@ -137,7 +137,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
CanQualType ResTy = Context.VoidTy;
@@ -154,7 +154,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
CXXDtorType Type) {
- llvm::SmallVector<CanQualType, 2> ArgTys;
+ SmallVector<CanQualType, 2> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
CanQualType ResTy = Context.VoidTy;
@@ -180,11 +180,11 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
- for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+ for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
e = MD->param_end(); i != e; ++i) {
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
}
@@ -216,7 +216,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CallArgList &Args,
const FunctionType::ExtInfo &Info) {
// FIXME: Kill copy.
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
@@ -227,7 +227,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const FunctionArgList &Args,
const FunctionType::ExtInfo &Info) {
// FIXME: Kill copy.
- llvm::SmallVector<CanQualType, 16> ArgTys;
+ SmallVector<CanQualType, 16> ArgTys;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
@@ -235,15 +235,15 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
}
const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
- llvm::SmallVector<CanQualType, 1> args;
+ SmallVector<CanQualType, 1> args;
return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const SmallVectorImpl<CanQualType> &ArgTys,
const FunctionType::ExtInfo &Info) {
#ifndef NDEBUG
- for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ for (SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
assert(I->isCanonicalAsParam());
#endif
@@ -312,50 +312,65 @@ CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
/***/
void CodeGenTypes::GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) {
- const RecordType *RT = type->getAsStructureType();
- assert(RT && "Can only expand structure types.");
- const RecordDecl *RD = RT->getDecl();
- assert(!RD->hasFlexibleArrayMember() &&
- "Cannot expand structure with flexible array.");
-
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ SmallVectorImpl<llvm::Type*> &expandedTypes) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
+ uint64_t NumElts = AT->getSize().getZExtValue();
+ for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
+ GetExpandedTypes(AT->getElementType(), expandedTypes);
+ } else if (const RecordType *RT = type->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- const FieldDecl *FD = *i;
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
-
- QualType fieldType = FD->getType();
- if (fieldType->isRecordType())
- GetExpandedTypes(fieldType, expandedTypes);
- else
- expandedTypes.push_back(ConvertType(fieldType));
- }
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ GetExpandedTypes(FD->getType(), expandedTypes);
+ }
+ } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
+ llvm::Type *EltTy = ConvertType(CT->getElementType());
+ expandedTypes.push_back(EltTy);
+ expandedTypes.push_back(EltTy);
+ } else
+ expandedTypes.push_back(ConvertType(type));
}
llvm::Function::arg_iterator
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
llvm::Function::arg_iterator AI) {
- const RecordType *RT = Ty->getAsStructureType();
- assert(RT && "Can only expand structure types.");
-
- RecordDecl *RD = RT->getDecl();
assert(LV.isSimple() &&
"Unexpected non-simple lvalue during struct expansion.");
llvm::Value *Addr = LV.getAddress();
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ AI = ExpandTypeFromArgs(EltTy, LV, AI);
+ }
+ } else if (const RecordType *RT = Ty->getAsStructureType()) {
+ RecordDecl *RD = RT->getDecl();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- FieldDecl *FD = *i;
- QualType FT = FD->getType();
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
- // FIXME: What are the right qualifiers here?
- LValue LV = EmitLValueForField(Addr, FD, 0);
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
AI = ExpandTypeFromArgs(FT, LV, AI);
- } else {
- EmitStoreThroughLValue(RValue::get(AI), LV);
- ++AI;
}
+ } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType EltTy = CT->getElementType();
+ llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV);
+ ++AI;
}
return AI;
@@ -367,12 +382,12 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
/// with an in-memory size smaller than DstSize.
static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
- const llvm::StructType *SrcSTy,
+ llvm::StructType *SrcSTy,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
if (SrcSTy->getNumElements() == 0) return SrcPtr;
- const llvm::Type *FirstElt = SrcSTy->getElementType(0);
+ llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it.
@@ -386,9 +401,9 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
// If the first element is a struct, recurse.
- const llvm::Type *SrcTy =
+ llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
- if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
return SrcPtr;
@@ -398,7 +413,7 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
/// are either integers or pointers. This does a truncation of the value if it
/// is too large or a zero extension if it is too small.
static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
CodeGenFunction &CGF) {
if (Val->getType() == Ty)
return Val;
@@ -412,7 +427,7 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
}
- const llvm::Type *DestIntTy = Ty;
+ llvm::Type *DestIntTy = Ty;
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
@@ -433,9 +448,9 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
/// destination type; in this situation the values of bits which not
/// present in the src are undefined.
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy =
+ llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
// If SrcTy and Ty are the same, just do a load.
@@ -444,7 +459,7 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
- if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
@@ -495,7 +510,7 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
llvm::Value *DestPtr, bool DestIsVolatile,
bool LowAlignment) {
// Prefer scalar stores to first-class aggregate stores.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
@@ -519,8 +534,8 @@ static void CreateCoercedStore(llvm::Value *Src,
llvm::Value *DstPtr,
bool DstIsVolatile,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy = Src->getType();
- const llvm::Type *DstTy =
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
@@ -529,7 +544,7 @@ static void CreateCoercedStore(llvm::Value *Src,
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
- if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
}
@@ -584,11 +599,11 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
default:
return false;
case BuiltinType::Float:
- return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
case BuiltinType::Double:
- return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
case BuiltinType::LongDouble:
- return getContext().Target.useObjCFPRetForRealType(
+ return getContext().getTargetInfo().useObjCFPRetForRealType(
TargetInfo::LongDouble);
}
}
@@ -614,8 +629,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
assert(Inserted && "Recursively being processed?");
- llvm::SmallVector<llvm::Type*, 8> argTypes;
- const llvm::Type *resultType = 0;
+ SmallVector<llvm::Type*, 8> argTypes;
+ llvm::Type *resultType = 0;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
@@ -632,7 +647,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
resultType = llvm::Type::getVoidTy(getLLVMContext());
QualType ret = FI.getReturnType();
- const llvm::Type *ty = ConvertType(ret);
+ llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = Context.getTargetAddressSpace(ret);
argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
break;
@@ -653,7 +668,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
case ABIArgInfo::Indirect: {
// indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
argTypes.push_back(LTy->getPointerTo());
break;
}
@@ -664,7 +679,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
llvm::Type *argType = argAI.getCoerceToType();
- if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
argTypes.push_back(st->getElementType(i));
} else {
@@ -685,7 +700,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
return llvm::FunctionType::get(resultType, argTypes, isVariadic);
}
-const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
@@ -714,6 +729,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// FIXME: handle sseregparm someday...
if (TargetDecl) {
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
if (TargetDecl->hasAttr<NoThrowAttr>())
FuncAttrs |= llvm::Attribute::NoUnwind;
else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
@@ -724,10 +741,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (TargetDecl->hasAttr<NoReturnAttr>())
FuncAttrs |= llvm::Attribute::NoReturn;
- if (TargetDecl->hasAttr<ConstAttr>())
+
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
+
+ // 'const' and 'pure' attribute functions are also nounwind.
+ if (TargetDecl->hasAttr<ConstAttr>()) {
FuncAttrs |= llvm::Attribute::ReadNone;
- else if (TargetDecl->hasAttr<PureAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ } else if (TargetDecl->hasAttr<PureAttr>()) {
FuncAttrs |= llvm::Attribute::ReadOnly;
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
if (TargetDecl->hasAttr<MallocAttr>())
RetAttrs |= llvm::Attribute::NoAlias;
}
@@ -763,7 +788,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ llvm_unreachable("Invalid ABI kind for return argument");
}
if (RetAttrs)
@@ -776,7 +801,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
else
RegParm = CodeGenOpts.NumRegisterParameters;
- unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+ unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
@@ -803,7 +828,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
}
// FIXME: handle sseregparm someday...
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(AI.getCoerceToType()))
Index += STy->getNumElements()-1; // 1 will be added below.
break;
@@ -824,7 +849,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
continue;
case ABIArgInfo::Expand: {
- llvm::SmallVector<llvm::Type*, 8> types;
+ SmallVector<llvm::Type*, 8> types;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
@@ -847,7 +872,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
const VarDecl *var,
llvm::Value *value) {
- const llvm::Type *varType = CGF.ConvertType(var->getType());
+ llvm::Type *varType = CGF.ConvertType(var->getType());
// This can happen with promotions that actually don't change the
// underlying type, like the enum promotions.
@@ -872,7 +897,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getResultType().getUnqualifiedType();
- const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
Builder.CreateStore(Zero, ReturnValue);
}
@@ -887,6 +912,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the struct return argument.
if (CGM.ReturnTypeUsesSRet(FI)) {
AI->setName("agg.result");
+ AI->addAttr(llvm::Attribute::NoAlias);
++AI;
}
@@ -918,7 +944,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
//
// FIXME: We should have a common utility for generating an aggregate
// copy.
- const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
@@ -954,9 +980,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
+ // Ensure the argument is the correct type.
+ if (V->getType() != ArgI.getCoerceToType())
+ V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
+
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
-
+
EmitParmDecl(*Arg, V, ArgNo);
break;
}
@@ -985,13 +1015,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
- AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
Builder.CreateStore(AI++, EltPtr);
}
@@ -1025,7 +1055,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ AI->setName(Arg->getName() + "." + Twine(Index));
continue;
}
@@ -1054,12 +1084,12 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
if (BB->empty()) return 0;
if (&BB->back() != result) return 0;
- const llvm::Type *resultType = result->getType();
+ llvm::Type *resultType = result->getType();
// result is in a BasicBlock and is therefore an Instruction.
llvm::Instruction *generator = cast<llvm::Instruction>(result);
- llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
+ SmallVector<llvm::Instruction*,4> insnsToKill;
// Look for:
// %generator = bitcast %type1* %generator2 to %type2*
@@ -1112,7 +1142,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
}
// Delete all the unnecessary instructions, from latest to earliest.
- for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
+ for (SmallVectorImpl<llvm::Instruction*>::iterator
i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
(*i)->eraseFromParent();
@@ -1218,7 +1248,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ llvm_unreachable("Invalid ABI kind for return argument");
}
llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
@@ -1324,7 +1354,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
- const llvm::PointerType *destType =
+ llvm::PointerType *destType =
cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
// If the address is a constant null, just pass the appropriate null.
@@ -1406,9 +1436,14 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
return emitWritebackArg(*this, args, CRE);
}
- if (type->isReferenceType())
+ assert(type->isReferenceType() == E->isGLValue() &&
+ "reference binding to unmaterialized r-value!");
+
+ if (E->isGLValue()) {
+ assert(E->getObjectKind() == OK_Ordinary);
return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
type);
+ }
if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
isa<ImplicitCastExpr>(E) &&
@@ -1427,8 +1462,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
/// on the current state of the EH stack.
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- llvm::ArrayRef<llvm::Value *> Args,
- const llvm::Twine &Name) {
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
if (!InvokeDest)
return Builder.CreateCall(Callee, Args, Name);
@@ -1442,8 +1477,8 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
- const llvm::Twine &Name) {
- return EmitCallOrInvoke(Callee, llvm::ArrayRef<llvm::Value *>(), Name);
+ const Twine &Name) {
+ return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
}
static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
@@ -1456,28 +1491,45 @@ static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
}
void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- llvm::SmallVector<llvm::Value*,16> &Args,
+ SmallVector<llvm::Value*,16> &Args,
llvm::FunctionType *IRFuncTy) {
- const RecordType *RT = Ty->getAsStructureType();
- assert(RT && "Can only expand structure types.");
-
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- llvm::Value *Addr = RV.getAggregateAddr();
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- FieldDecl *FD = *i;
- QualType FT = FD->getType();
-
- // FIXME: What are the right qualifiers here?
- LValue LV = EmitLValueForField(Addr, FD, 0);
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
- ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()),
- Args, IRFuncTy);
- continue;
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ RValue EltRV;
+ if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = RValue::getAggregate(LV.getAddress());
+ else
+ EltRV = EmitLoadOfLValue(LV);
+ ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
}
+ } else if (const RecordType *RT = Ty->getAsStructureType()) {
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
- RValue RV = EmitLoadOfLValue(LV);
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ RValue FldRV;
+ if (CodeGenFunction::hasAggregateLLVMType(FT))
+ FldRV = RValue::getAggregate(LV.getAddress());
+ else
+ FldRV = EmitLoadOfLValue(LV);
+ ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
+ }
+ } else if (isa<ComplexType>(Ty)) {
+ ComplexPairTy CV = RV.getComplexVal();
+ Args.push_back(CV.first);
+ Args.push_back(CV.second);
+ } else {
assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
@@ -1499,7 +1551,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const Decl *TargetDecl,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
- llvm::SmallVector<llvm::Value*, 16> Args;
+ SmallVector<llvm::Value*, 16> Args;
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
@@ -1630,7 +1682,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
- if (const llvm::StructType *STy =
+ if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(STy));
@@ -1668,10 +1720,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// with unprototyped functions.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
- const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
- const llvm::FunctionType *CurFT =
+ llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ llvm::FunctionType *CurFT =
cast<llvm::FunctionType>(CurPT->getElementType());
- const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+ llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
@@ -1813,11 +1865,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ llvm_unreachable("Invalid ABI kind for return argument");
}
- assert(0 && "Unhandled ABIArgInfo::Kind");
- return RValue::get(0);
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
}
/* VarArg handling */
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 343b944bf6c9..24ed366dd860 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -42,7 +42,7 @@ namespace clang {
class VarDecl;
namespace CodeGen {
- typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+ typedef SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
struct CallArg {
RValue RV;
@@ -56,7 +56,7 @@ namespace CodeGen {
/// CallArgList - Type for representing both the value and type of
/// arguments in a call.
class CallArgList :
- public llvm::SmallVector<CallArg, 16> {
+ public SmallVector<CallArg, 16> {
public:
struct Writeback {
/// The original argument.
@@ -90,18 +90,18 @@ namespace CodeGen {
bool hasWritebacks() const { return !Writebacks.empty(); }
- typedef llvm::SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ typedef SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
writeback_iterator writeback_begin() const { return Writebacks.begin(); }
writeback_iterator writeback_end() const { return Writebacks.end(); }
private:
- llvm::SmallVector<Writeback, 1> Writebacks;
+ SmallVector<Writeback, 1> Writebacks;
};
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
- class FunctionArgList : public llvm::SmallVector<const VarDecl*, 16> {
+ class FunctionArgList : public SmallVector<const VarDecl*, 16> {
};
/// CGFunctionInfo - Class to encapsulate the information about a
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 7dbaaf85299f..c28ecc05ded6 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -62,7 +62,7 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
if (Offset.isZero())
return 0;
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
Types.ConvertType(getContext().getPointerDiffType());
return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
@@ -95,7 +95,7 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
// TODO: for complete types, this should be possible with a GEP.
llvm::Value *V = This;
if (Offset.isPositive()) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
V = Builder.CreateBitCast(V, Int8PtrTy);
V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
}
@@ -107,7 +107,7 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
static llvm::Value *
ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
CharUnits NonVirtual, llvm::Value *Virtual) {
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
llvm::Value *NonVirtualOffset = 0;
@@ -125,7 +125,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
BaseOffset = NonVirtualOffset;
// Apply the base offset.
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
@@ -155,7 +155,7 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
Start, PathEnd);
// Get the base pointer type.
- const llvm::Type *BasePtrTy =
+ llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
if (NonVirtualOffset.isZero() && !VBase) {
@@ -225,7 +225,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
QualType DerivedTy =
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
- const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+ llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
@@ -398,8 +398,11 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
BaseClassDecl,
isBaseVirtual);
- AggValueSlot AggSlot = AggValueSlot::forAddr(V, Qualifiers(),
- /*Lifetime*/ true);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(V, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
@@ -436,8 +439,11 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), Dest,
LHS.isVolatileQualified());
} else {
- AggValueSlot Slot = AggValueSlot::forAddr(Dest, LHS.getQuals(),
- /*Lifetime*/ true);
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(Dest, LHS.getQuals(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(MemberInit->getInit(), Slot);
}
@@ -521,6 +527,12 @@ namespace {
}
};
}
+
+static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
+ bool Moving) {
+ return Moving ? Record->hasTrivialMoveConstructor() :
+ Record->hasTrivialCopyConstructor();
+}
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
@@ -547,11 +559,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
- // FIXME: If there's no initializer and the CXXCtorInitializer
- // was implicitly generated, we shouldn't be zeroing memory.
- if (FieldType->isArrayType() && !MemberInit->getInit()) {
- CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
- } else if (!CGF.hasAggregateLLVMType(Field->getType())) {
+ if (!CGF.hasAggregateLLVMType(Field->getType())) {
if (LHS.isSimple()) {
CGF.EmitExprAsInit(MemberInit->getInit(), Field, LHS, false);
} else {
@@ -565,15 +573,15 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
llvm::Value *ArrayIndexVar = 0;
const ConstantArrayType *Array
= CGF.getContext().getAsConstantArrayType(FieldType);
- if (Array && Constructor->isImplicit() &&
- Constructor->isCopyConstructor()) {
- const llvm::Type *SizeTy
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ llvm::Type *SizeTy
= CGF.ConvertType(CGF.getContext().getSizeType());
// The LHS is a pointer to the first object we'll be constructing, as
// a flat array.
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
BasePtr);
@@ -589,7 +597,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// constructors, perform a single aggregate copy.
const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
if (BaseElementTy.isPODType(CGF.getContext()) ||
- (Record && Record->hasTrivialCopyConstructor())) {
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
// Find the source pointer. We knows it's the last argument because
// we know we're in a copy constructor.
unsigned SrcArgIndex = Args.size() - 1;
@@ -684,7 +693,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitStopPoint(Builder);
+ DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
return;
}
@@ -729,7 +738,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
const CXXRecordDecl *ClassDecl = CD->getParent();
- llvm::SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
+ SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
@@ -971,6 +980,10 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
const CXXRecordDecl *ClassDecl = DD->getParent();
+ // Unions have no bases and do not call field destructors.
+ if (ClassDecl->isUnion())
+ return;
+
// The complete-destructor phase just destructs all the virtual bases.
if (DtorType == Dtor_Complete) {
@@ -1018,7 +1031,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
}
// Destroy direct fields.
- llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
+ SmallVector<const FieldDecl *, 16> FieldDecls;
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
E = ClassDecl->field_end(); I != E; ++I) {
const FieldDecl *field = *I;
@@ -1195,7 +1208,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
- assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
const Expr *E = (*ArgBeg);
QualType Ty = E->getType();
@@ -1217,7 +1231,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
CallExpr::const_arg_iterator ArgEnd) {
if (D->isTrivial()) {
assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
- assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
return;
}
@@ -1236,7 +1251,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
// Push the src ptr.
QualType QT = *(FPT->arg_type_begin());
- const llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ llvm::Type *t = CGM.getTypes().ConvertType(QT);
Src = Builder.CreateBitCast(Src, t);
Args.add(RValue::get(Src), QT);
@@ -1258,10 +1273,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitCallArg(Args, *Arg, ArgType);
}
- QualType ResultType = FPT->getResultType();
- EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
- FPT->getExtInfo()),
- Callee, ReturnValueSlot(), Args, D);
+ EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
+ ReturnValueSlot(), Args, D);
}
void
@@ -1326,7 +1339,10 @@ CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor
llvm::Value *ThisPtr = LoadCXXThis();
AggValueSlot AggSlot =
- AggValueSlot::forAddr(ThisPtr, Qualifiers(), /*Lifetime*/ true);
+ AggValueSlot::forAddr(ThisPtr, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
@@ -1394,12 +1410,12 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *BaseClassDecl) {
llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
CharUnits VBaseOffsetOffset =
- CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
"vbase.offset.ptr");
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
ConvertType(getContext().getPointerDiffType());
VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
@@ -1436,7 +1452,8 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
// And load the address point from the VTT.
VTableAddressPoint = Builder.CreateLoad(VTT);
} else {
- uint64_t AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
VTableAddressPoint =
Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
}
@@ -1465,7 +1482,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
VirtualOffset);
// Finally, store the address point.
- const llvm::Type *AddressPointPtrTy =
+ llvm::Type *AddressPointPtrTy =
VTableAddressPoint->getType()->getPointerTo();
VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
Builder.CreateStore(VTableAddressPoint, VTableField);
@@ -1549,7 +1566,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
}
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
return Builder.CreateLoad(VTablePtrSrc, "vtable");
}
@@ -1605,7 +1622,6 @@ static const Expr *skipNoOpCastsAndParens(const Expr *E) {
/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
/// function call on the given expr can be devirtualized.
-/// expr can be devirtualized.
static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
const CXXMethodDecl *MD) {
// If the most derived class is marked final, we know that no subclass can
@@ -1677,7 +1693,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
llvm::Value *This) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 9c5dd1f23721..b2d0786cb6cd 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -48,7 +48,7 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
if (rv.isComplex()) {
CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
- const llvm::Type *ComplexTy =
+ llvm::Type *ComplexTy =
llvm::StructType::get(V.first->getType(), V.second->getType(),
(void*) 0);
llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
@@ -119,16 +119,30 @@ char *EHScopeStack::allocate(size_t Size) {
}
EHScopeStack::stable_iterator
-EHScopeStack::getEnclosingEHCleanup(iterator it) const {
- assert(it != end());
- do {
- if (isa<EHCleanupScope>(*it)) {
- if (cast<EHCleanupScope>(*it).isEHCleanup())
- return stabilize(it);
- return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
+ si != se; ) {
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive()) return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
+ for (stable_iterator si = getInnermostEHScope(), se = stable_end();
+ si != se; ) {
+ // Skip over inactive cleanups.
+ EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
+ if (cleanup && !cleanup->isActive()) {
+ si = cleanup->getEnclosingEHScope();
+ continue;
}
- ++it;
- } while (it != end());
+
+ // All other scopes are always active.
+ return si;
+ }
+
return stable_end();
}
@@ -146,11 +160,11 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
Size,
BranchFixups.size(),
InnermostNormalCleanup,
- InnermostEHCleanup);
+ InnermostEHScope);
if (IsNormalCleanup)
InnermostNormalCleanup = stable_begin();
if (IsEHCleanup)
- InnermostEHCleanup = stable_begin();
+ InnermostEHScope = stable_begin();
return Scope->getCleanupBuffer();
}
@@ -161,11 +175,9 @@ void EHScopeStack::popCleanup() {
assert(isa<EHCleanupScope>(*begin()));
EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ InnermostEHScope = Cleanup.getEnclosingEHScope();
StartOfData += Cleanup.getAllocatedSize();
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
// Destroy the cleanup.
Cleanup.~EHCleanupScope();
@@ -182,37 +194,35 @@ void EHScopeStack::popCleanup() {
}
}
-EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
- char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
- CatchDepth++;
- return new (Buffer) EHFilterScope(NumFilters);
+EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
+ assert(getInnermostEHScope() == stable_end());
+ char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
+ EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
+ InnermostEHScope = stable_begin();
+ return filter;
}
void EHScopeStack::popFilter() {
assert(!empty() && "popping exception stack when not empty");
- EHFilterScope &Filter = cast<EHFilterScope>(*begin());
- StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+ EHFilterScope &filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched filter push/pop");
- CatchDepth--;
+ InnermostEHScope = filter.getEnclosingEHScope();
}
-EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
- char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
- CatchDepth++;
- EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
- for (unsigned I = 0; I != NumHandlers; ++I)
- Scope->getHandlers()[I].Index = getNextEHDestIndex();
- return Scope;
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ EHCatchScope *scope =
+ new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
+ InnermostEHScope = stable_begin();
+ return scope;
}
void EHScopeStack::pushTerminate() {
char *Buffer = allocate(EHTerminateScope::getSize());
- CatchDepth++;
- new (Buffer) EHTerminateScope(getNextEHDestIndex());
+ new (Buffer) EHTerminateScope(InnermostEHScope);
+ InnermostEHScope = stable_begin();
}
/// Remove any 'null' fixups on the stack. However, we can't pop more
@@ -384,17 +394,6 @@ static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
return Entry;
}
-static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
- EHCleanupScope &Scope) {
- assert(Scope.isEHCleanup());
- llvm::BasicBlock *Entry = Scope.getEHBlock();
- if (!Entry) {
- Entry = CGF.createBasicBlock("eh.cleanup");
- Scope.setEHBlock(Entry);
- }
- return Entry;
-}
-
/// Attempts to reduce a cleanup's entry block to a fallthrough. This
/// is basically llvm::MergeBlockIntoPredecessor, except
/// simplified/optimized for the tighter constraints on cleanup blocks.
@@ -483,6 +482,49 @@ static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
}
}
+/// We don't need a normal entry block for the given cleanup.
+/// Optimistic fixup branches can cause these blocks to come into
+/// existence anyway; if so, destroy it.
+///
+/// The validity of this transformation is very much specific to the
+/// exact ways in which we form branches to cleanup entries.
+static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &scope) {
+ llvm::BasicBlock *entry = scope.getNormalBlock();
+ if (!entry) return;
+
+ // Replace all the uses with unreachable.
+ llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
+ for (llvm::BasicBlock::use_iterator
+ i = entry->use_begin(), e = entry->use_end(); i != e; ) {
+ llvm::Use &use = i.getUse();
+ ++i;
+
+ use.set(unreachableBB);
+
+ // The only uses should be fixup switches.
+ llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
+ if (si->getNumCases() == 2 && si->getDefaultDest() == unreachableBB) {
+ // Replace the switch with a branch.
+ llvm::BranchInst::Create(si->getSuccessor(1), si);
+
+ // The switch operand is a load from the cleanup-dest alloca.
+ llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
+
+ // Destroy the switch.
+ si->eraseFromParent();
+
+ // Destroy the load.
+ assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+ assert(condition->use_empty());
+ condition->eraseFromParent();
+ }
+ }
+
+ assert(entry->use_empty());
+ delete entry;
+}
+
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
@@ -501,7 +543,10 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Check whether we need an EH cleanup. This is only true if we've
// generated a lazy EH cleanup block.
- bool RequiresEHCleanup = Scope.hasEHBranches();
+ llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
+ assert(Scope.hasEHBranches() == (EHEntry != 0));
+ bool RequiresEHCleanup = (EHEntry != 0);
+ EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
// Check the three conditions which might require a normal cleanup:
@@ -537,43 +582,37 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
RequiresNormalCleanup = true;
}
- EHScopeStack::Cleanup::Flags cleanupFlags;
- if (Scope.isNormalCleanup())
- cleanupFlags.setIsNormalCleanupKind();
- if (Scope.isEHCleanup())
- cleanupFlags.setIsEHCleanupKind();
-
- // Even if we don't need the normal cleanup, we might still have
- // prebranched fallthrough to worry about.
- if (Scope.isNormalCleanup() && !RequiresNormalCleanup &&
- HasPrebranchedFallthrough) {
- assert(!IsActive);
-
- llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
-
- // If we're branching through this cleanup, just forward the
- // prebranched fallthrough to the next cleanup, leaving the insert
- // point in the old block.
+ // If we have a prebranched fallthrough into an inactive normal
+ // cleanup, rewrite it so that it leads to the appropriate place.
+ if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
+ llvm::BasicBlock *prebranchDest;
+
+ // If the prebranch is semantically branching through the next
+ // cleanup, just forward it to the next block, leaving the
+ // insertion point in the prebranched block.
if (FallthroughIsBranchThrough) {
- EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
- llvm::BasicBlock *EnclosingEntry =
- CreateNormalEntry(*this, cast<EHCleanupScope>(S));
-
- ForwardPrebranchedFallthrough(FallthroughSource,
- NormalEntry, EnclosingEntry);
- assert(NormalEntry->use_empty() &&
- "uses of entry remain after forwarding?");
- delete NormalEntry;
+ EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
- // Otherwise, we're branching out; just emit the next block.
+ // Otherwise, we need to make a new block. If the normal cleanup
+ // isn't being used at all, we could actually reuse the normal
+ // entry block, but this is simpler, and it avoids conflicts with
+ // dead optimistic fixup branches.
} else {
- EmitBlock(NormalEntry);
- SimplifyCleanupEntry(*this, NormalEntry);
+ prebranchDest = createBasicBlock("forwarded-prebranch");
+ EmitBlock(prebranchDest);
}
+
+ llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
+ assert(normalEntry && !normalEntry->use_empty());
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ normalEntry, prebranchDest);
}
// If we don't need the cleanup at all, we're done.
if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
EHStack.popCleanup(); // safe because there are no fixups
assert(EHStack.getNumBranchFixups() == 0 ||
EHStack.hasNormalCleanups());
@@ -583,7 +622,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Copy the cleanup emission data out. Note that SmallVector
// guarantees maximal alignment for its buffer regardless of its
// type parameter.
- llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
CleanupBuffer.reserve(Scope.getCleanupSize());
memcpy(CleanupBuffer.data(),
Scope.getCleanupBuffer(), Scope.getCleanupSize());
@@ -591,63 +630,14 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EHScopeStack::Cleanup *Fn =
reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
- // We want to emit the EH cleanup after the normal cleanup, but go
- // ahead and do the setup for the EH cleanup while the scope is still
- // alive.
- llvm::BasicBlock *EHEntry = 0;
- llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
- if (RequiresEHCleanup) {
- EHEntry = CreateEHEntry(*this, Scope);
-
- // Figure out the branch-through dest if necessary.
- llvm::BasicBlock *EHBranchThroughDest = 0;
- if (Scope.hasEHBranchThroughs()) {
- assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
- EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
- EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
- }
-
- // If we have exactly one branch-after and no branch-throughs, we
- // can dispatch it without a switch.
- if (!Scope.hasEHBranchThroughs() &&
- Scope.getNumEHBranchAfters() == 1) {
- assert(!EHBranchThroughDest);
-
- // TODO: remove the spurious eh.cleanup.dest stores if this edge
- // never went through any switches.
- llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
-
- // Otherwise, if we have any branch-afters, we need a switch.
- } else if (Scope.getNumEHBranchAfters()) {
- // The default of the switch belongs to the branch-throughs if
- // they exist.
- llvm::BasicBlock *Default =
- (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
-
- const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
-
- llvm::LoadInst *Load =
- new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
- llvm::SwitchInst *Switch =
- llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
-
- EHInstsToAppend.push_back(Load);
- EHInstsToAppend.push_back(Switch);
-
- for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
- Switch->addCase(Scope.getEHBranchAfterIndex(I),
- Scope.getEHBranchAfterBlock(I));
-
- // Otherwise, we have only branch-throughs; jump to the next EH
- // cleanup.
- } else {
- assert(EHBranchThroughDest);
- EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
- }
- }
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
if (!RequiresNormalCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
EHStack.popCleanup();
} else {
// If we have a fallthrough and no other need for the cleanup,
@@ -655,15 +645,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (HasFallthrough && !HasPrebranchedFallthrough &&
!HasFixups && !HasExistingBranches) {
- // Fixups can cause us to optimistically create a normal block,
- // only to later have no real uses for it. Just delete it in
- // this case.
- // TODO: we can potentially simplify all the uses after this.
- if (Scope.getNormalBlock()) {
- Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
- delete Scope.getNormalBlock();
- }
-
+ destroyOptimisticNormalEntry(*this, Scope);
EHStack.popCleanup();
EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
@@ -676,18 +658,19 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// I. Set up the fallthrough edge in.
+ CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
+
// If there's a fallthrough, we need to store the cleanup
// destination index. For fall-throughs this is always zero.
if (HasFallthrough) {
if (!HasPrebranchedFallthrough)
Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
- // Otherwise, clear the IP if we don't have fallthrough because
- // the cleanup is inactive. We don't need to save it because
- // it's still just FallthroughSource.
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
} else if (FallthroughSource) {
assert(!IsActive && "source without fallthrough for active cleanup");
- Builder.ClearInsertionPoint();
+ savedInactiveFallthroughIP = Builder.saveAndClearIP();
}
// II. Emit the entry block. This implicitly branches to it if
@@ -716,7 +699,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
}
llvm::BasicBlock *FallthroughDest = 0;
- llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
+ SmallVector<llvm::Instruction*, 2> InstsToAppend;
// If there's exactly one branch-after and no other threads,
// we can route it without a switch.
@@ -800,25 +783,14 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// V. Set up the fallthrough edge out.
- // Case 1: a fallthrough source exists but shouldn't branch to
- // the cleanup because the cleanup is inactive.
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
if (!HasFallthrough && FallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
assert(!IsActive);
-
- // If we have a prebranched fallthrough, that needs to be
- // forwarded to the right block.
- if (HasPrebranchedFallthrough) {
- llvm::BasicBlock *Next;
- if (FallthroughIsBranchThrough) {
- Next = BranchThroughDest;
- assert(!FallthroughDest);
- } else {
- Next = FallthroughDest;
- }
-
- ForwardPrebranchedFallthrough(FallthroughSource, NormalEntry, Next);
- }
- Builder.SetInsertPoint(FallthroughSource);
+ Builder.restoreIP(savedInactiveFallthroughIP);
// Case 2: a fallthrough source exists and should branch to the
// cleanup, but we're not supposed to branch through to the next
@@ -864,10 +836,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
cleanupFlags.setIsForEHCleanup();
EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
- // Append the prepared cleanup prologue from above.
- llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
- for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
- EHExit->getInstList().push_back(EHInstsToAppend[I]);
+ Builder.CreateBr(getEHDispatchBlock(EHParent));
Builder.restoreIP(SavedIP);
@@ -979,64 +948,6 @@ void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
Builder.ClearInsertionPoint();
}
-void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
- // We should never get invalid scope depths for an UnwindDest; that
- // implies that the destination wasn't set up correctly.
- assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
-
- if (!HaveInsertPoint())
- return;
-
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
-
- // Calculate the innermost active cleanup.
- EHScopeStack::stable_iterator
- InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
-
- // If the destination is in the same EH cleanup scope as us, we
- // don't need to thread through anything.
- if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
- Builder.ClearInsertionPoint();
- return;
- }
- assert(InnermostCleanup != EHStack.stable_end());
-
- // Store the index at the start.
- llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
- new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
-
- // Adjust BI to point to the first cleanup block.
- {
- EHCleanupScope &Scope =
- cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
- BI->setSuccessor(0, CreateEHEntry(*this, Scope));
- }
-
- // Add this destination to all the scopes involved.
- for (EHScopeStack::stable_iterator
- I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
- assert(E.strictlyEncloses(I));
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
- assert(Scope.isEHCleanup());
- I = Scope.getEnclosingEHCleanup();
-
- // If this is the last cleanup we're propagating through, add this
- // as a branch-after.
- if (I == E) {
- Scope.addEHBranchAfter(Index, Dest.getBlock());
- break;
- }
-
- // Otherwise, add it as a branch-through. If this isn't new
- // information, all the rest of the work has been done before.
- if (!Scope.addEHBranchThrough(Dest.getBlock()))
- break;
- }
-
- Builder.ClearInsertionPoint();
-}
-
static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
EHScopeStack::stable_iterator C) {
// If we needed a normal block for any reason, that counts.
@@ -1057,18 +968,21 @@ static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
}
static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
- EHScopeStack::stable_iterator C) {
+ EHScopeStack::stable_iterator cleanup) {
// If we needed an EH block for any reason, that counts.
- if (cast<EHCleanupScope>(*EHStack.find(C)).getEHBlock())
+ if (EHStack.find(cleanup)->hasEHBranches())
return true;
// Check whether any enclosed cleanups were needed.
for (EHScopeStack::stable_iterator
- I = EHStack.getInnermostEHCleanup(); I != C; ) {
- assert(C.strictlyEncloses(I));
- EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
- if (S.getEHBlock()) return true;
- I = S.getEnclosingEHCleanup();
+ i = EHStack.getInnermostEHScope(); i != cleanup; ) {
+ assert(cleanup.strictlyEncloses(i));
+
+ EHScope &scope = *EHStack.find(i);
+ if (scope.hasEHBranches())
+ return true;
+
+ i = scope.getEnclosingEHScope();
}
return false;
@@ -1163,10 +1077,3 @@ llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
return NormalCleanupDest;
}
-
-llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
- if (!EHCleanupDest)
- EHCleanupDest =
- CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
- return EHCleanupDest;
-}
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index c93ec5bb76a9..7726e442c025 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -29,25 +29,102 @@ namespace CodeGen {
/// A protected scope for zero-cost EH handling.
class EHScope {
llvm::BasicBlock *CachedLandingPad;
+ llvm::BasicBlock *CachedEHDispatchBlock;
- unsigned K : 2;
+ EHScopeStack::stable_iterator EnclosingEHScope;
+
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned Kind : 2;
+ };
+ enum { NumCommonBits = 2 };
protected:
- enum { BitsRemaining = 30 };
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumHandlers : 32 - NumCommonBits;
+ };
+
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned IsActive : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : 32 - 17 - NumCommonBits; // currently 13
+ };
+
+ class FilterBitFields {
+ friend class EHFilterScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumFilters : 32 - NumCommonBits;
+ };
+
+ union {
+ CommonBitFields CommonBits;
+ CatchBitFields CatchBits;
+ CleanupBitFields CleanupBits;
+ FilterBitFields FilterBits;
+ };
public:
enum Kind { Cleanup, Catch, Terminate, Filter };
- EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+ EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
+ : CachedLandingPad(0), CachedEHDispatchBlock(0),
+ EnclosingEHScope(enclosingEHScope) {
+ CommonBits.Kind = kind;
+ }
- Kind getKind() const { return static_cast<Kind>(K); }
+ Kind getKind() const { return static_cast<Kind>(CommonBits.Kind); }
llvm::BasicBlock *getCachedLandingPad() const {
return CachedLandingPad;
}
- void setCachedLandingPad(llvm::BasicBlock *Block) {
- CachedLandingPad = Block;
+ void setCachedLandingPad(llvm::BasicBlock *block) {
+ CachedLandingPad = block;
+ }
+
+ llvm::BasicBlock *getCachedEHDispatchBlock() const {
+ return CachedEHDispatchBlock;
+ }
+
+ void setCachedEHDispatchBlock(llvm::BasicBlock *block) {
+ CachedEHDispatchBlock = block;
+ }
+
+ bool hasEHBranches() const {
+ if (llvm::BasicBlock *block = getCachedEHDispatchBlock())
+ return !block->use_empty();
+ return false;
+ }
+
+ EHScopeStack::stable_iterator getEnclosingEHScope() const {
+ return EnclosingEHScope;
}
};
@@ -57,8 +134,6 @@ public:
/// Objective C @finally blocks are represented using a cleanup scope
/// after the catch scope.
class EHCatchScope : public EHScope {
- unsigned NumHandlers : BitsRemaining;
-
// In effect, we have a flexible array member
// Handler Handlers[0];
// But that's only standard in C99, not C++, so we have to do
@@ -73,8 +148,7 @@ public:
/// The catch handler for this type.
llvm::BasicBlock *Block;
- /// The unwind destination index for this handler.
- unsigned Index;
+ bool isCatchAll() const { return Type == 0; }
};
private:
@@ -93,12 +167,14 @@ public:
return sizeof(EHCatchScope) + N * sizeof(Handler);
}
- EHCatchScope(unsigned NumHandlers)
- : EHScope(Catch), NumHandlers(NumHandlers) {
+ EHCatchScope(unsigned numHandlers,
+ EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Catch, enclosingEHScope) {
+ CatchBits.NumHandlers = numHandlers;
}
unsigned getNumHandlers() const {
- return NumHandlers;
+ return CatchBits.NumHandlers;
}
void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
@@ -127,44 +203,16 @@ public:
/// A cleanup scope which generates the cleanup blocks lazily.
class EHCleanupScope : public EHScope {
- /// Whether this cleanup needs to be run along normal edges.
- bool IsNormalCleanup : 1;
-
- /// Whether this cleanup needs to be run along exception edges.
- bool IsEHCleanup : 1;
-
- /// Whether this cleanup is currently active.
- bool IsActive : 1;
-
- /// Whether the normal cleanup should test the activation flag.
- bool TestFlagInNormalCleanup : 1;
-
- /// Whether the EH cleanup should test the activation flag.
- bool TestFlagInEHCleanup : 1;
-
- /// The amount of extra storage needed by the Cleanup.
- /// Always a multiple of the scope-stack alignment.
- unsigned CleanupSize : 12;
-
- /// The number of fixups required by enclosing scopes (not including
- /// this one). If this is the top cleanup scope, all the fixups
- /// from this index onwards belong to this scope.
- unsigned FixupDepth : BitsRemaining - 17; // currently 13
-
/// The nearest normal cleanup scope enclosing this one.
EHScopeStack::stable_iterator EnclosingNormal;
- /// The nearest EH cleanup scope enclosing this one.
+ /// The nearest EH scope enclosing this one.
EHScopeStack::stable_iterator EnclosingEH;
/// The dual entry/exit block along the normal edge. This is lazily
/// created if needed before the cleanup is popped.
llvm::BasicBlock *NormalBlock;
- /// The dual entry/exit block along the EH edge. This is lazily
- /// created if needed before the cleanup is popped.
- llvm::BasicBlock *EHBlock;
-
/// An optional i1 variable indicating whether this cleanup has been
/// activated yet.
llvm::AllocaInst *ActiveFlag;
@@ -178,17 +226,8 @@ class EHCleanupScope : public EHScope {
llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
/// Normal branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
BranchAfters;
-
- /// The destinations of EH branch-afters and branch-throughs.
- /// TODO: optimize for the extremely common case of a single
- /// branch-through.
- llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
-
- /// EH branch-afters.
- llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
- EHBranchAfters;
};
mutable struct ExtInfo *ExtInfo;
@@ -210,56 +249,64 @@ public:
}
size_t getAllocatedSize() const {
- return sizeof(EHCleanupScope) + CleanupSize;
+ return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
}
- EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
- unsigned CleanupSize, unsigned FixupDepth,
- EHScopeStack::stable_iterator EnclosingNormal,
- EHScopeStack::stable_iterator EnclosingEH)
- : EHScope(EHScope::Cleanup),
- IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), IsActive(IsActive),
- TestFlagInNormalCleanup(false), TestFlagInEHCleanup(false),
- CleanupSize(CleanupSize), FixupDepth(FixupDepth),
- EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
- NormalBlock(0), EHBlock(0), ActiveFlag(0), ExtInfo(0)
- {
- assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
+ EHCleanupScope(bool isNormal, bool isEH, bool isActive,
+ unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal,
+ EHScopeStack::stable_iterator enclosingEH)
+ : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal),
+ NormalBlock(0), ActiveFlag(0), ExtInfo(0) {
+ CleanupBits.IsNormalCleanup = isNormal;
+ CleanupBits.IsEHCleanup = isEH;
+ CleanupBits.IsActive = isActive;
+ CleanupBits.TestFlagInNormalCleanup = false;
+ CleanupBits.TestFlagInEHCleanup = false;
+ CleanupBits.CleanupSize = cleanupSize;
+ CleanupBits.FixupDepth = fixupDepth;
+
+ assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
}
~EHCleanupScope() {
delete ExtInfo;
}
- bool isNormalCleanup() const { return IsNormalCleanup; }
+ bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
- bool isEHCleanup() const { return IsEHCleanup; }
- llvm::BasicBlock *getEHBlock() const { return EHBlock; }
- void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+ bool isEHCleanup() const { return CleanupBits.IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return getCachedEHDispatchBlock(); }
+ void setEHBlock(llvm::BasicBlock *BB) { setCachedEHDispatchBlock(BB); }
- bool isActive() const { return IsActive; }
- void setActive(bool A) { IsActive = A; }
+ bool isActive() const { return CleanupBits.IsActive; }
+ void setActive(bool A) { CleanupBits.IsActive = A; }
llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
- void setTestFlagInNormalCleanup() { TestFlagInNormalCleanup = true; }
- bool shouldTestFlagInNormalCleanup() const { return TestFlagInNormalCleanup; }
+ void setTestFlagInNormalCleanup() {
+ CleanupBits.TestFlagInNormalCleanup = true;
+ }
+ bool shouldTestFlagInNormalCleanup() const {
+ return CleanupBits.TestFlagInNormalCleanup;
+ }
- void setTestFlagInEHCleanup() { TestFlagInEHCleanup = true; }
- bool shouldTestFlagInEHCleanup() const { return TestFlagInEHCleanup; }
+ void setTestFlagInEHCleanup() {
+ CleanupBits.TestFlagInEHCleanup = true;
+ }
+ bool shouldTestFlagInEHCleanup() const {
+ return CleanupBits.TestFlagInEHCleanup;
+ }
- unsigned getFixupDepth() const { return FixupDepth; }
+ unsigned getFixupDepth() const { return CleanupBits.FixupDepth; }
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
return EnclosingNormal;
}
- EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
- return EnclosingEH;
- }
- size_t getCleanupSize() const { return CleanupSize; }
+ size_t getCleanupSize() const { return CleanupBits.CleanupSize; }
void *getCleanupBuffer() { return this + 1; }
EHScopeStack::Cleanup *getCleanup() {
@@ -327,41 +374,6 @@ public:
return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
}
- // Same stuff, only for EH branches instead of normal branches.
- // It's quite possible that we could find a better representation
- // for this.
-
- bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
- void addEHBranchAfter(llvm::ConstantInt *Index,
- llvm::BasicBlock *Block) {
- struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.EHBranches.insert(Block))
- ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
- }
-
- unsigned getNumEHBranchAfters() const {
- return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
- }
-
- llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].first;
- }
-
- llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
- assert(I < getNumEHBranchAfters());
- return ExtInfo->EHBranchAfters[I].second;
- }
-
- bool addEHBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().EHBranches.insert(Block);
- }
-
- bool hasEHBranchThroughs() const {
- if (!ExtInfo) return false;
- return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
- }
-
static bool classof(const EHScope *Scope) {
return (Scope->getKind() == Cleanup);
}
@@ -373,8 +385,6 @@ public:
///
/// This is used to implement C++ exception specifications.
class EHFilterScope : public EHScope {
- unsigned NumFilters : BitsRemaining;
-
// Essentially ends in a flexible array member:
// llvm::Value *FilterTypes[0];
@@ -387,42 +397,42 @@ class EHFilterScope : public EHScope {
}
public:
- EHFilterScope(unsigned NumFilters) :
- EHScope(Filter), NumFilters(NumFilters) {}
+ EHFilterScope(unsigned numFilters)
+ : EHScope(Filter, EHScopeStack::stable_end()) {
+ FilterBits.NumFilters = numFilters;
+ }
- static size_t getSizeForNumFilters(unsigned NumFilters) {
- return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ static size_t getSizeForNumFilters(unsigned numFilters) {
+ return sizeof(EHFilterScope) + numFilters * sizeof(llvm::Value*);
}
- unsigned getNumFilters() const { return NumFilters; }
+ unsigned getNumFilters() const { return FilterBits.NumFilters; }
- void setFilter(unsigned I, llvm::Value *FilterValue) {
- assert(I < getNumFilters());
- getFilters()[I] = FilterValue;
+ void setFilter(unsigned i, llvm::Value *filterValue) {
+ assert(i < getNumFilters());
+ getFilters()[i] = filterValue;
}
- llvm::Value *getFilter(unsigned I) const {
- assert(I < getNumFilters());
- return getFilters()[I];
+ llvm::Value *getFilter(unsigned i) const {
+ assert(i < getNumFilters());
+ return getFilters()[i];
}
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Filter;
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Filter;
}
};
/// An exceptions scope which calls std::terminate if any exception
/// reaches it.
class EHTerminateScope : public EHScope {
- unsigned DestIndex : BitsRemaining;
public:
- EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
+ EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Terminate, enclosingEHScope) {}
static size_t getSize() { return sizeof(EHTerminateScope); }
- unsigned getDestIndex() const { return DestIndex; }
-
- static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Terminate;
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Terminate;
}
};
@@ -498,26 +508,17 @@ inline EHScopeStack::iterator EHScopeStack::end() const {
inline void EHScopeStack::popCatch() {
assert(!empty() && "popping exception stack when not empty");
- assert(isa<EHCatchScope>(*begin()));
- StartOfData += EHCatchScope::getSizeForNumHandlers(
- cast<EHCatchScope>(*begin()).getNumHandlers());
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
+ EHCatchScope &scope = cast<EHCatchScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ StartOfData += EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers());
}
inline void EHScopeStack::popTerminate() {
assert(!empty() && "popping exception stack when not empty");
- assert(isa<EHTerminateScope>(*begin()));
+ EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
StartOfData += EHTerminateScope::getSize();
-
- if (empty()) NextEHDestIndex = FirstEHDestIndex;
-
- assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
- CatchDepth--;
}
inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
@@ -532,28 +533,6 @@ EHScopeStack::stabilize(iterator ir) const {
return stable_iterator(EndOfBuffer - ir.Ptr);
}
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveNormalCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingNormalCleanup();
- }
- return stable_end();
-}
-
-inline EHScopeStack::stable_iterator
-EHScopeStack::getInnermostActiveEHCleanup() const {
- for (EHScopeStack::stable_iterator
- I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
- EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
- if (S.isActive()) return I;
- I = S.getEnclosingEHCleanup();
- }
- return stable_end();
-}
-
}
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 4c1244591743..c7a9b407d264 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -33,7 +33,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Dwarf.h"
-#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
using namespace clang;
@@ -46,12 +46,46 @@ CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
}
CGDebugInfo::~CGDebugInfo() {
- assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+ assert(LexicalBlockStack.empty() &&
+ "Region stack mismatch, stack not empty!");
}
void CGDebugInfo::setLocation(SourceLocation Loc) {
- if (Loc.isValid())
- CurLoc = CGM.getContext().getSourceManager().getInstantiationLoc(Loc);
+ // If the new location isn't valid return.
+ if (!Loc.isValid()) return;
+
+ CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
+
+ // If we've changed files in the middle of a lexical scope go ahead
+ // and create a new lexical scope with file node if it's different
+ // from the one in the scope.
+ if (LexicalBlockStack.empty()) return;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+ PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
+
+ if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
+ !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ return;
+
+ llvm::MDNode *LB = LexicalBlockStack.back();
+ llvm::DIScope Scope = llvm::DIScope(LB);
+ if (Scope.isLexicalBlockFile()) {
+ llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(LB);
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(LBF.getScope(),
+ getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ } else if (Scope.isLexicalBlock()) {
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ }
}
/// getContextDescriptor - Get context info for the decl.
@@ -81,7 +115,7 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
/// getFunctionName - Get function name for the given FunctionDecl. If the
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
-llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
assert (FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
if (FII)
@@ -93,10 +127,10 @@ llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
// Copy this name on the side and use its reference.
char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
memcpy(StrPtr, NS.data(), NS.length());
- return llvm::StringRef(StrPtr, NS.length());
+ return StringRef(StrPtr, NS.length());
}
-llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
llvm::SmallString<256> MethodName;
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
@@ -116,22 +150,20 @@ llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
memcpy(StrPtr, MethodName.begin(), OS.tell());
- return llvm::StringRef(StrPtr, OS.tell());
+ return StringRef(StrPtr, OS.tell());
}
/// getSelectorName - Return selector name. This is used for debugging
/// info.
-llvm::StringRef CGDebugInfo::getSelectorName(Selector S) {
- llvm::SmallString<256> SName;
- llvm::raw_svector_ostream OS(SName);
- OS << S.getAsString();
- char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
- memcpy(StrPtr, SName.begin(), OS.tell());
- return llvm::StringRef(StrPtr, OS.tell());
+StringRef CGDebugInfo::getSelectorName(Selector S) {
+ const std::string &SName = S.getAsString();
+ char *StrPtr = DebugInfoNames.Allocate<char>(SName.size());
+ memcpy(StrPtr, SName.data(), SName.size());
+ return StringRef(StrPtr, SName.size());
}
/// getClassName - Get class name including template argument list.
-llvm::StringRef
+StringRef
CGDebugInfo::getClassName(RecordDecl *RD) {
ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD);
@@ -160,7 +192,7 @@ CGDebugInfo::getClassName(RecordDecl *RD) {
// Copy this name on the side and use its reference.
char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
memcpy(StrPtr, Buffer.data(), Buffer.length());
- return llvm::StringRef(StrPtr, Buffer.length());
+ return StringRef(StrPtr, Buffer.length());
}
/// getOrCreateFile - Get the file debug info descriptor for the input location.
@@ -172,7 +204,7 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid() || llvm::StringRef(PLoc.getFilename()).empty())
+ if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
// If the location is not valid then use main input file.
return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
@@ -202,7 +234,7 @@ llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
- assert (CurLoc.isValid() && "Invalid current location!");
+ assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getLine() : 0;
@@ -211,20 +243,20 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
/// getColumnNumber - Get column number for the location. If location is
/// invalid then use current location.
unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
- assert (CurLoc.isValid() && "Invalid current location!");
+ assert((Loc.isValid() || CurLoc.isValid()) && "Invalid current location!");
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
return PLoc.isValid()? PLoc.getColumn() : 0;
}
-llvm::StringRef CGDebugInfo::getCurrentDirname() {
+StringRef CGDebugInfo::getCurrentDirname() {
if (!CWDName.empty())
return CWDName;
- char *CompDirnamePtr = NULL;
- llvm::sys::Path CWD = llvm::sys::Path::GetCurrentDirectory();
- CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
- memcpy(CompDirnamePtr, CWD.c_str(), CWD.size());
- return CWDName = llvm::StringRef(CompDirnamePtr, CWD.size());
+ llvm::SmallString<256> CWD;
+ llvm::sys::fs::current_path(CWD);
+ char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
+ memcpy(CompDirnamePtr, CWD.data(), CWD.size());
+ return CWDName = StringRef(CompDirnamePtr, CWD.size());
}
/// CreateCompileUnit - Create new compile unit.
@@ -250,7 +282,7 @@ void CGDebugInfo::CreateCompileUnit() {
// Save filename string.
char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
- llvm::StringRef Filename(FilenamePtr, MainFileName.length());
+ StringRef Filename(FilenamePtr, MainFileName.length());
unsigned LangTag;
const LangOptions &LO = CGM.getLangOptions();
@@ -289,7 +321,17 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
unsigned Encoding = 0;
const char *BTName = NULL;
switch (BT->getKind()) {
- default:
+ case BuiltinType::Dependent:
+ llvm_unreachable("Unexpected builtin type Dependent");
+ case BuiltinType::Overload:
+ llvm_unreachable("Unexpected builtin type Overload");
+ case BuiltinType::BoundMember:
+ llvm_unreachable("Unexpected builtin type BoundMember");
+ case BuiltinType::UnknownAny:
+ llvm_unreachable("Unexpected builtin type UnknownAny");
+ case BuiltinType::NullPtr:
+ return DBuilder.
+ createNullPtrType(BT->getName(CGM.getContext().getLangOptions()));
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
@@ -312,7 +354,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
- llvm::SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Value *, 16> EltTys;
llvm::DIType FieldTy =
DBuilder.createMemberType(getOrCreateMainFile(), "isa",
getOrCreateMainFile(), 0, Size,
@@ -334,17 +376,22 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
case BuiltinType::Char_S:
case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::Char16:
+ case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break;
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::UInt128:
case BuiltinType::ULong:
+ case BuiltinType::WChar_U:
case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
case BuiltinType::Short:
case BuiltinType::Int:
case BuiltinType::Int128:
case BuiltinType::Long:
+ case BuiltinType::WChar_S:
case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::LongDouble:
case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
@@ -432,7 +479,7 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
Ty->getPointeeType(), Unit);
}
-/// CreatePointeeType - Create PointTee type. If Pointee is a record
+/// CreatePointeeType - Create Pointee type. If Pointee is a record
/// then emit record's fwd if debug info size reduction is enabled.
llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
llvm::DIFile Unit) {
@@ -477,7 +524,7 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getContext().Target.getPointerWidth(AS);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
return
@@ -489,7 +536,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
if (BlockLiteralGenericSet)
return BlockLiteralGeneric;
- llvm::SmallVector<llvm::Value *, 8> EltTys;
+ SmallVector<llvm::Value *, 8> EltTys;
llvm::DIType FieldTy;
QualType FType;
uint64_t FieldSize, FieldOffset;
@@ -567,7 +614,7 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile Unit) {
- llvm::SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Value *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
@@ -587,9 +634,9 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
return DbgTy;
}
-llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
+llvm::DIType CGDebugInfo::createFieldType(StringRef name,
QualType type,
- Expr *bitWidth,
+ uint64_t sizeInBitsOverride,
SourceLocation loc,
AccessSpecifier AS,
uint64_t offsetInBits,
@@ -606,8 +653,8 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
if (!type->isIncompleteArrayType()) {
llvm::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
- if (bitWidth)
- sizeInBits = bitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ if (sizeInBitsOverride)
+ sizeInBits = sizeInBitsOverride;
}
unsigned flags = 0;
@@ -624,7 +671,7 @@ llvm::DIType CGDebugInfo::createFieldType(llvm::StringRef name,
/// record fields. This is used while creating debug info entry for a Record.
void CGDebugInfo::
CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
- llvm::SmallVectorImpl<llvm::Value *> &elements,
+ SmallVectorImpl<llvm::Value *> &elements,
llvm::DIType RecordTy) {
unsigned fieldNo = 0;
const FieldDecl *LastFD = 0;
@@ -644,7 +691,7 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
LastFD = field;
}
- llvm::StringRef name = field->getName();
+ StringRef name = field->getName();
QualType type = field->getType();
// Ignore unnamed fields unless they're anonymous structs/unions.
@@ -653,8 +700,14 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
continue;
}
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+
llvm::DIType fieldType
- = createFieldType(name, type, field->getBitWidth(),
+ = createFieldType(name, type, SizeInBitsOverride,
field->getLocation(), field->getAccess(),
layout.getFieldOffset(fieldNo), tunit, RecordTy);
@@ -674,25 +727,23 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Unit);
// Add "this" pointer.
-
llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
- llvm::SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Value *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
- if (!Method->isStatic())
- {
- // "this" pointer is always first argument.
- QualType ThisPtr = Method->getThisType(CGM.getContext());
- llvm::DIType ThisPtrType =
- DBuilder.createArtificialType(getOrCreateType(ThisPtr, Unit));
-
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- Elts.push_back(ThisPtrType);
- }
+ if (!Method->isStatic()) {
+ // "this" pointer is always first argument.
+ QualType ThisPtr = Method->getThisType(CGM.getContext());
+ llvm::DIType ThisPtrType =
+ DBuilder.createArtificialType(getOrCreateType(ThisPtr, Unit));
+
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ Elts.push_back(ThisPtrType);
+ }
// Copy rest of the arguments.
for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
@@ -723,12 +774,12 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
bool IsCtorOrDtor =
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
- llvm::StringRef MethodName = getFunctionName(Method);
+ StringRef MethodName = getFunctionName(Method);
llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
- llvm::StringRef MethodLinkageName;
+ StringRef MethodLinkageName;
if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
MethodLinkageName = CGM.getMangledName(Method);
@@ -750,7 +801,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// It doesn't make sense to give a virtual destructor a vtable index,
// since a single destructor has two entries in the vtable.
if (!isa<CXXDestructorDecl>(Method))
- VIndex = CGM.getVTables().getMethodVTableIndex(Method);
+ VIndex = CGM.getVTableContext().getMethodVTableIndex(Method);
ContainingType = RecordTy;
}
@@ -774,7 +825,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
Flags |= llvm::DIDescriptor::FlagPrototyped;
llvm::DISubprogram SP =
- DBuilder.createMethod(RecordTy , MethodName, MethodLinkageName,
+ DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
/* isDefinition=*/ false,
@@ -791,7 +842,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys,
+ SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
for(CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
@@ -809,11 +860,12 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys,
+ SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
-
for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
BE = RD->friend_end(); BI != BE; ++BI) {
+ if ((*BI)->isUnsupportedFriend())
+ continue;
if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
EltTys.push_back(DBuilder.createFriend(RecordTy,
getOrCreateType(TInfo->getType(),
@@ -826,7 +878,7 @@ CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// a Record.
void CGDebugInfo::
CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys,
+ SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
@@ -842,7 +894,8 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
BaseOffset =
- 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base).getQuantity();
+ 0 - CGM.getVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base).getQuantity();
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
BaseOffset = RL.getBaseClassOffsetInBits(Base);
@@ -868,7 +921,7 @@ llvm::DIArray CGDebugInfo::
CollectTemplateParams(const TemplateParameterList *TPList,
const TemplateArgumentList &TAList,
llvm::DIFile Unit) {
- llvm::SmallVector<llvm::Value *, 16> TemplateParams;
+ SmallVector<llvm::Value *, 16> TemplateParams;
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
const NamedDecl *ND = TPList->getParam(i);
@@ -892,9 +945,11 @@ CollectTemplateParams(const TemplateParameterList *TPList,
/// info for function template parameters.
llvm::DIArray CGDebugInfo::
CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
- if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization){
+ if (FD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
const TemplateParameterList *TList =
- FD->getTemplateSpecializationInfo()->getTemplate()->getTemplateParameters();
+ FD->getTemplateSpecializationInfo()->getTemplate()
+ ->getTemplateParameters();
return
CollectTemplateParams(TList, *FD->getTemplateSpecializationArgs(), Unit);
}
@@ -936,14 +991,14 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
}
/// getVTableName - Get vtable name for the given Class.
-llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
// Otherwise construct gdb compatible name name.
std::string Name = "_vptr$" + RD->getNameAsString();
// Copy this name on the side and use its reference.
char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
memcpy(StrPtr, Name.data(), Name.length());
- return llvm::StringRef(StrPtr, Name.length());
+ return StringRef(StrPtr, Name.length());
}
@@ -951,7 +1006,7 @@ llvm::StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
/// debug info entry in EltTys vector.
void CGDebugInfo::
CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys) {
+ SmallVectorImpl<llvm::Value *> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
// If there is a primary base then it will hold vtable info.
@@ -1016,11 +1071,11 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// it.
TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
- RegionStack.push_back(FwdDeclNode);
+ LexicalBlockStack.push_back(FwdDeclNode);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Value *, 16> EltTys;
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
@@ -1040,7 +1095,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// Create the descriptor for static variable.
llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
- llvm::StringRef VName = V->getName();
+ StringRef VName = V->getName();
llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
// Do not use DIGlobalVariable for enums.
if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
@@ -1062,7 +1117,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
TParamsArray = CollectCXXTemplateParams(TSpecial, Unit);
}
- RegionStack.pop_back();
+ LexicalBlockStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
RegionMap.find(Ty->getDecl());
if (RI != RegionMap.end())
@@ -1070,7 +1125,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
llvm::DIDescriptor RDContext =
getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- llvm::StringRef RDName = RD->getName();
+ StringRef RDName = RD->getName();
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
@@ -1134,8 +1189,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
unsigned Line = getLineNumber(ID->getLocation());
unsigned RuntimeLang = TheCU.getLanguage();
- // If this is just a forward declaration, return a special forward-declaration
- // debug type.
+ // If this is just a forward declaration return a special forward-declaration
+ // debug type since we won't be able to lay out the entire type.
if (ID->isForwardDecl()) {
llvm::DIType FwdDecl =
DBuilder.createStructType(Unit, ID->getName(),
@@ -1144,12 +1199,12 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return FwdDecl;
}
- // To handle recursive interface, we
- // first generate a debug descriptor for the struct as a forward declaration.
- // Then (if it is a definition) we go through and get debug info for all of
- // its members. Finally, we create a descriptor for the complete type (which
- // may refer to the forward decl if the struct is recursive) and replace all
- // uses of the forward declaration with the final definition.
+ // To handle a recursive interface, we first generate a debug descriptor
+ // for the struct as a forward declaration. Then (if it is a definition)
+ // we go through and get debug info for all of its members. Finally, we
+ // create a descriptor for the complete type (which may refer to the
+ // forward decl if the struct is recursive) and replace all uses of the
+ // forward declaration with the final definition.
llvm::DIType FwdDecl = DBuilder.createTemporaryType(DefUnit);
llvm::MDNode *MN = FwdDecl;
@@ -1158,11 +1213,11 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// it.
TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
- RegionStack.push_back(FwdDeclNode);
+ LexicalBlockStack.push_back(FwdDeclNode);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
- llvm::SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Value *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
@@ -1177,7 +1232,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
}
const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
-
+ ObjCImplementationDecl *ImpD = ID->getImplementation();
unsigned FieldNo = 0;
for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
Field = Field->getNextIvar(), ++FieldNo) {
@@ -1185,7 +1240,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (!FieldTy.isValid())
return llvm::DIType();
- llvm::StringRef FieldName = Field->getName();
+ StringRef FieldName = Field->getName();
// Ignore unnamed fields.
if (FieldName.empty())
@@ -1201,15 +1256,18 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (!FType->isIncompleteArrayType()) {
// Bit size, align and offset of the type.
- FieldSize = CGM.getContext().getTypeSize(FType);
- Expr *BitWidth = Field->getBitWidth();
- if (BitWidth)
- FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
-
- FieldAlign = CGM.getContext().getTypeAlign(FType);
+ FieldSize = Field->isBitField()
+ ? Field->getBitWidthValue(CGM.getContext())
+ : CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
}
- uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+ // We can't know the offset of our ivar in the structure if we're using
+ // the non-fragile abi and the debugger should ignore the value anyways.
+ // Call it the FieldNo+1 due to how debuggers use the information,
+ // e.g. negating the value when it needs a lookup in the dynamic table.
+ uint64_t FieldOffset = CGM.getLangOptions().ObjCNonFragileABI ? FieldNo+1
+ : RL.getFieldOffset(FieldNo);
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
@@ -1217,17 +1275,21 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
- llvm::StringRef PropertyName;
- llvm::StringRef PropertyGetter;
- llvm::StringRef PropertySetter;
+ StringRef PropertyName;
+ StringRef PropertyGetter;
+ StringRef PropertySetter;
unsigned PropertyAttributes = 0;
- if (ObjCPropertyDecl *PD =
- ID->FindPropertyVisibleInPrimaryClass(Field->getIdentifier())) {
+ ObjCPropertyDecl *PD = NULL;
+ if (ImpD)
+ if (ObjCPropertyImplDecl *PImpD =
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier()))
+ PD = PImpD->getPropertyDecl();
+ if (PD) {
PropertyName = PD->getName();
PropertyGetter = getSelectorName(PD->getGetterName());
PropertySetter = getSelectorName(PD->getSetterName());
PropertyAttributes = PD->getPropertyAttributes();
- }
+ }
FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
FieldLine, FieldSize, FieldAlign,
FieldOffset, Flags, FieldTy,
@@ -1238,7 +1300,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- RegionStack.pop_back();
+ LexicalBlockStack.pop_back();
llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator RI =
RegionMap.find(Ty->getDecl());
if (RI != RegionMap.end())
@@ -1322,7 +1384,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
- llvm::SmallVector<llvm::Value *, 8> Subscripts;
+ SmallVector<llvm::Value *, 8> Subscripts;
QualType EltTy(Ty, 0);
if (Ty->isIncompleteArrayType())
EltTy = Ty->getElementType();
@@ -1339,7 +1401,8 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
LowerBound = 1;
// FIXME: Verify this is right for VLAs.
- Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound, UpperBound));
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound,
+ UpperBound));
EltTy = Ty->getElementType();
}
}
@@ -1395,15 +1458,22 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIArray Elements = DBuilder.getOrCreateArray(ElementTypes);
- return DBuilder.createStructType(U, llvm::StringRef("test"),
+ return DBuilder.createStructType(U, StringRef("test"),
U, 0, FieldOffset,
0, 0, Elements);
}
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
+ llvm::DIFile U) {
+ // Ignore the atomic wrapping
+ // FIXME: What is the correct representation?
+ return getOrCreateType(Ty->getValueType(), U);
+}
+
/// CreateEnumType - get enumeration type.
llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
- llvm::SmallVector<llvm::Value *, 16> Enumerators;
+ SmallVector<llvm::Value *, 16> Enumerators;
// Create DIEnumerator elements for each enumerator.
for (EnumDecl::enumerator_iterator
@@ -1522,7 +1592,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- assert(false && "Dependent types cannot show up in debug information");
+ llvm_unreachable("Dependent types cannot show up in debug information");
case Type::ExtVector:
case Type::Vector:
@@ -1558,6 +1628,9 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::MemberPointer:
return CreateType(cast<MemberPointerType>(Ty), Unit);
+ case Type::Atomic:
+ return CreateType(cast<AtomicType>(Ty), Unit);
+
case Type::Attributed:
case Type::TemplateSpecialization:
case Type::Elaborated:
@@ -1573,7 +1646,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
}
assert(Diag && "Fall through without a diagnostic?");
- unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"debug information for %0 is not yet supported");
CGM.getDiags().Report(DiagID)
<< Diag;
@@ -1582,7 +1655,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
- llvm::StringRef Name,
+ StringRef Name,
uint64_t *Offset) {
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
@@ -1627,13 +1700,14 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
// implicit parameter "this".
-llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D, QualType FnType,
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
+ QualType FnType,
llvm::DIFile F) {
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
else if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
- llvm::SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Value *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
@@ -1642,7 +1716,7 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D, QualType FnTyp
// "cmd" pointer is always second argument.
Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
// Get rest of the arguments.
- for (ObjCMethodDecl::param_iterator PI = OMethod->param_begin(),
+ for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
PE = OMethod->param_end(); PI != PE; ++PI)
Elts.push_back(getOrCreateType((*PI)->getType(), F));
@@ -1658,13 +1732,13 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn,
CGBuilderTy &Builder) {
- llvm::StringRef Name;
- llvm::StringRef LinkageName;
+ StringRef Name;
+ StringRef LinkageName;
- FnBeginRegionCount.push_back(RegionStack.size());
+ FnBeginRegionCount.push_back(LexicalBlockStack.size());
const Decl *D = GD.getDecl();
-
+
unsigned Flags = 0;
llvm::DIFile Unit = getOrCreateFile(CurLoc);
llvm::DIDescriptor FDContext(Unit);
@@ -1677,7 +1751,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
- RegionStack.push_back(SPN);
+ LexicalBlockStack.push_back(SPN);
RegionMap[D] = llvm::WeakVH(SP);
return;
}
@@ -1687,7 +1761,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (!Fn->hasInternalLinkage())
LinkageName = CGM.getMangledName(GD);
if (LinkageName == Name)
- LinkageName = llvm::StringRef();
+ LinkageName = StringRef();
if (FD->hasPrototype())
Flags |= llvm::DIDescriptor::FlagPrototyped;
if (const NamespaceDecl *NSDecl =
@@ -1726,121 +1800,85 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// Push function on region stack.
llvm::MDNode *SPN = SP;
- RegionStack.push_back(SPN);
+ LexicalBlockStack.push_back(SPN);
RegionMap[D] = llvm::WeakVH(SP);
-
- // Clear stack used to keep track of #line directives.
- LineDirectiveFiles.clear();
}
+/// EmitLocation - Emit metadata to indicate a change in line/column
+/// information in the source file.
+void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
+
+ // Update our current location
+ setLocation(Loc);
-void CGDebugInfo::EmitStopPoint(CGBuilderTy &Builder) {
if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
// Don't bother if things are the same as last time.
SourceManager &SM = CGM.getContext().getSourceManager();
- if (CurLoc == PrevLoc
- || (SM.getInstantiationLineNumber(CurLoc) ==
- SM.getInstantiationLineNumber(PrevLoc)
- && SM.isFromSameFile(CurLoc, PrevLoc)))
+ if (CurLoc == PrevLoc ||
+ SM.getExpansionLoc(CurLoc) == SM.getExpansionLoc(PrevLoc))
// New Builder may not be in sync with CGDebugInfo.
if (!Builder.getCurrentDebugLocation().isUnknown())
return;
-
+
// Update last state.
PrevLoc = CurLoc;
- llvm::MDNode *Scope = RegionStack.back();
+ llvm::MDNode *Scope = LexicalBlockStack.back();
Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
getColumnNumber(CurLoc),
Scope));
}
-/// UpdateLineDirectiveRegion - Update region stack only if #line directive
-/// has introduced scope change.
-void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
- if (CurLoc.isInvalid() || CurLoc.isMacroID() ||
- PrevLoc.isInvalid() || PrevLoc.isMacroID())
- return;
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
- PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
-
- if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
- !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
- return;
-
- // If #line directive stack is empty then we are entering a new scope.
- if (LineDirectiveFiles.empty()) {
- EmitRegionStart(Builder);
- LineDirectiveFiles.push_back(PCLoc.getFilename());
- return;
- }
-
- assert (RegionStack.size() >= LineDirectiveFiles.size()
- && "error handling #line regions!");
-
- bool SeenThisFile = false;
- // Chek if current file is already seen earlier.
- for(std::vector<const char *>::iterator I = LineDirectiveFiles.begin(),
- E = LineDirectiveFiles.end(); I != E; ++I)
- if (!strcmp(PCLoc.getFilename(), *I)) {
- SeenThisFile = true;
- break;
- }
+/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
+/// the stack.
+void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::DIDescriptor D =
+ DBuilder.createLexicalBlock(LexicalBlockStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ LexicalBlockStack.push_back(DN);
+}
- // If #line for this file is seen earlier then pop out #line regions.
- if (SeenThisFile) {
- while (!LineDirectiveFiles.empty()) {
- const char *LastFile = LineDirectiveFiles.back();
- RegionStack.pop_back();
- LineDirectiveFiles.pop_back();
- if (!strcmp(PPLoc.getFilename(), LastFile))
- break;
- }
- return;
- }
+/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
+/// region - beginning of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) {
+ // Set our current location.
+ setLocation(Loc);
- // .. otherwise insert new #line region.
- EmitRegionStart(Builder);
- LineDirectiveFiles.push_back(PCLoc.getFilename());
+ // Create a new lexical block and push it on the stack.
+ CreateLexicalBlock(Loc);
- return;
-}
-/// EmitRegionStart- Constructs the debug code for entering a declarative
-/// region - "llvm.dbg.region.start.".
-void CGDebugInfo::EmitRegionStart(CGBuilderTy &Builder) {
- llvm::DIDescriptor D =
- DBuilder.createLexicalBlock(RegionStack.empty() ?
- llvm::DIDescriptor() :
- llvm::DIDescriptor(RegionStack.back()),
- getOrCreateFile(CurLoc),
- getLineNumber(CurLoc),
- getColumnNumber(CurLoc));
- llvm::MDNode *DN = D;
- RegionStack.push_back(DN);
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
+ getColumnNumber(Loc),
+ LexicalBlockStack.back()));
}
-/// EmitRegionEnd - Constructs the debug code for exiting a declarative
-/// region - "llvm.dbg.region.end."
-void CGDebugInfo::EmitRegionEnd(CGBuilderTy &Builder) {
- assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
+/// region - end of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
- // Provide an region stop point.
- EmitStopPoint(Builder);
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, Loc);
- RegionStack.pop_back();
+ LexicalBlockStack.pop_back();
}
/// EmitFunctionEnd - Constructs the debug code for exiting a function.
void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
- assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
unsigned RCount = FnBeginRegionCount.back();
- assert(RCount <= RegionStack.size() && "Region stack mismatch");
+ assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
// Pop all regions for this function.
- while (RegionStack.size() != RCount)
- EmitRegionEnd(Builder);
+ while (LexicalBlockStack.size() != RCount)
+ EmitLexicalBlockEnd(Builder, CurLoc);
FnBeginRegionCount.pop_back();
}
@@ -1849,7 +1887,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
uint64_t *XOffset) {
- llvm::SmallVector<llvm::Value *, 5> EltTys;
+ SmallVector<llvm::Value *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
@@ -1876,7 +1914,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
- CGM.getContext().Target.getPointerAlign(0))) {
+ CGM.getContext().getTargetInfo().getPointerAlign(0))) {
CharUnits FieldOffsetInBytes
= CGM.getContext().toCharUnitsFromBits(FieldOffset);
CharUnits AlignedOffsetInBytes
@@ -1916,7 +1954,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
- assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
@@ -1940,7 +1978,8 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// If an aggregate variable has non trivial destructor or non trivial copy
// constructor than it is pass indirectly. Let debug info know about this
// by using reference of the aggregate type as a argument type.
- if (!Record->hasTrivialCopyConstructor() || !Record->hasTrivialDestructor())
+ if (!Record->hasTrivialCopyConstructor() ||
+ !Record->hasTrivialDestructor())
Ty = DBuilder.createReferenceType(Ty);
}
}
@@ -1951,18 +1990,18 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
unsigned Flags = 0;
if (VD->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- llvm::MDNode *Scope = RegionStack.back();
+ llvm::MDNode *Scope = LexicalBlockStack.back();
- llvm::StringRef Name = VD->getName();
+ StringRef Name = VD->getName();
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
- llvm::SmallVector<llvm::Value *, 9> addr;
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
- CGM.getContext().Target.getPointerWidth(0));
+ CGM.getContext().getTargetInfo().getPointerWidth(0));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
@@ -1973,14 +2012,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// Create the descriptor for the variable.
llvm::DIVariable D =
DBuilder.createComplexVariable(Tag,
- llvm::DIDescriptor(RegionStack.back()),
+ llvm::DIDescriptor(Scope),
VD->getName(), Unit, Line, Ty,
addr, ArgNo);
// Insert an llvm.dbg.declare into the current block.
+ // Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
-
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
}
@@ -1993,7 +2032,6 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
-
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
}
@@ -2008,7 +2046,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
I != E; ++I) {
FieldDecl *Field = *I;
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
- llvm::StringRef FieldName = Field->getName();
+ StringRef FieldName = Field->getName();
// Ignore unnamed fields. Do not ignore unnamed records.
if (FieldName.empty() && !isa<RecordType>(Field->getType()))
@@ -2024,7 +2062,6 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
-
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
}
@@ -2040,7 +2077,7 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
const CGBlockInfo &blockInfo) {
- assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
if (Builder.GetInsertBlock() == 0)
return;
@@ -2065,15 +2102,16 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- llvm::SmallVector<llvm::Value *, 9> addr;
- const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
if (isByRef) {
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
// offset of __forwarding field
- offset = CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits());
+ offset = CGM.getContext()
+ .toCharUnitsFromBits(target.getPointerSizeInBits());
addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
@@ -2085,14 +2123,13 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
// Create the descriptor for the variable.
llvm::DIVariable D =
DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
- llvm::DIDescriptor(RegionStack.back()),
+ llvm::DIDescriptor(LexicalBlockStack.back()),
VD->getName(), Unit, Line, Ty, addr);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
+ llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
-
- llvm::MDNode *Scope = RegionStack.back();
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column,
+ LexicalBlockStack.back()));
}
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
@@ -2131,7 +2168,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
const llvm::StructLayout *blockLayout =
CGM.getTargetData().getStructLayout(block.StructureType);
- llvm::SmallVector<llvm::Value*, 16> fields;
+ SmallVector<llvm::Value*, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(0),
tunit, tunit));
@@ -2154,7 +2191,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
- llvm::SmallVector<BlockLayoutChunk, 8> chunks;
+ SmallVector<BlockLayoutChunk, 8> chunks;
// 'this' capture.
if (blockDecl->capturesCXXThis()) {
@@ -2187,7 +2224,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Sort by offset.
llvm::array_pod_sort(chunks.begin(), chunks.end());
- for (llvm::SmallVectorImpl<BlockLayoutChunk>::iterator
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
i = chunks.begin(), e = chunks.end(); i != e; ++i) {
uint64_t offsetInBits = i->OffsetInBits;
const BlockDecl::Capture *capture = i->Capture;
@@ -2204,7 +2241,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
}
const VarDecl *variable = capture->getVariable();
- llvm::StringRef name = variable->getName();
+ StringRef name = variable->getName();
llvm::DIType fieldType;
if (capture->isByRef()) {
@@ -2239,8 +2276,8 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Get overall information about the block.
unsigned flags = llvm::DIDescriptor::FlagArtificial;
- llvm::MDNode *scope = RegionStack.back();
- llvm::StringRef name = ".block_descriptor";
+ llvm::MDNode *scope = LexicalBlockStack.back();
+ StringRef name = ".block_descriptor";
// Create the descriptor for the parameter.
llvm::DIVariable debugVar =
@@ -2265,6 +2302,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
+ setLocation(D->getLocation());
+
QualType T = D->getType();
if (T->isIncompleteArrayType()) {
@@ -2277,13 +2316,13 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
- llvm::StringRef DeclName = D->getName();
- llvm::StringRef LinkageName;
+ StringRef DeclName = D->getName();
+ StringRef LinkageName;
if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext())
&& !isa<ObjCMethodDecl>(D->getDeclContext()))
LinkageName = Var->getName();
if (LinkageName == DeclName)
- LinkageName = llvm::StringRef();
+ LinkageName = StringRef();
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
DBuilder.createStaticVariable(DContext, DeclName, LinkageName,
@@ -2298,7 +2337,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
- llvm::StringRef Name = ID->getName();
+ StringRef Name = ID->getName();
QualType T = CGM.getContext().getObjCInterfaceType(ID);
if (T->isIncompleteArrayType()) {
@@ -2323,7 +2362,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
- llvm::StringRef Name = VD->getName();
+ StringRef Name = VD->getName();
llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index f87d0072e323..a4533a83d579 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -56,21 +56,18 @@ class CGDebugInfo {
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
- std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
+ // LexicalBlockStack - Keep track of our current nested lexical block.
+ std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
- // FnBeginRegionCount - Keep track of RegionStack counter at the beginning
- // of a function. This is used to pop unbalanced regions at the end of a
- // function.
+ // FnBeginRegionCount - Keep track of LexicalBlockStack counter at the
+ // beginning of a function. This is used to pop unbalanced regions at
+ // the end of a function.
std::vector<unsigned> FnBeginRegionCount;
- /// LineDirectiveFiles - This stack is used to keep track of
- /// scopes introduced by #line directives.
- std::vector<const char *> LineDirectiveFiles;
-
/// DebugInfoNames - This is a storage for names that are
/// constructed on demand. For example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator DebugInfoNames;
- llvm::StringRef CWDName;
+ StringRef CWDName;
llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
@@ -95,6 +92,7 @@ class CGDebugInfo {
llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
llvm::DIType CreateEnumType(const EnumDecl *ED);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
@@ -113,17 +111,17 @@ class CGDebugInfo {
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Value *> &E,
llvm::DIType T);
void CollectCXXFriends(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys,
+ SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
void CollectCXXBases(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys,
+ SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
llvm::DIArray
@@ -136,30 +134,35 @@ class CGDebugInfo {
CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
llvm::DIFile F);
- llvm::DIType createFieldType(llvm::StringRef name, QualType type,
- Expr *bitWidth, SourceLocation loc,
+ llvm::DIType createFieldType(StringRef name, QualType type,
+ uint64_t sizeInBitsOverride, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
llvm::DIFile tunit,
llvm::DIDescriptor scope);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Value *> &E,
llvm::DIType RecordTy);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
- llvm::SmallVectorImpl<llvm::Value *> &EltTys);
+ SmallVectorImpl<llvm::Value *> &EltTys);
+ // CreateLexicalBlock - Create a new lexical block node and push it on
+ // the stack.
+ void CreateLexicalBlock(SourceLocation Loc);
+
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
+ void finalize() { DBuilder.finalize(); }
/// setLocation - Update the current source location. If \arg loc is
/// invalid it is ignored.
void setLocation(SourceLocation Loc);
- /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
- /// source line.
- void EmitStopPoint(CGBuilderTy &Builder);
+ /// EmitLocation - Emit metadata to indicate a change in line/column
+ /// information in the source file.
+ void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
/// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
@@ -169,21 +172,17 @@ public:
/// EmitFunctionEnd - Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder);
- /// UpdateLineDirectiveRegion - Update region stack only if #line directive
- /// has introduced scope change.
- void UpdateLineDirectiveRegion(CGBuilderTy &Builder);
-
/// UpdateCompletedType - Update type cache because the type is now
/// translated.
void UpdateCompletedType(const TagDecl *TD);
- /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
- /// of a new block.
- void EmitRegionStart(CGBuilderTy &Builder);
+ /// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a
+ /// new lexical block and push the block onto the stack.
+ void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
- /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
- /// block.
- void EmitRegionEnd(CGBuilderTy &Builder);
+ /// EmitLexicalBlockEnd - Emit metadata to indicate the end of a new lexical
+ /// block and pop the current block.
+ void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc);
/// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
/// variable declaration.
@@ -234,7 +233,7 @@ private:
llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
/// getCurrentDirname - Return current directory name.
- llvm::StringRef getCurrentDirname();
+ StringRef getCurrentDirname();
/// CreateCompileUnit - Create new compile unit.
void CreateCompileUnit();
@@ -255,7 +254,7 @@ private:
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
- llvm::StringRef Name, uint64_t *Offset);
+ StringRef Name, uint64_t *Offset);
/// getFunctionDeclaration - Return debug info descriptor to describe method
/// declaration for the given method definition.
@@ -264,21 +263,21 @@ private:
/// getFunctionName - Get function name for the given FunctionDecl. If the
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
- llvm::StringRef getFunctionName(const FunctionDecl *FD);
+ StringRef getFunctionName(const FunctionDecl *FD);
/// getObjCMethodName - Returns the unmangled name of an Objective-C method.
/// This is the display name for the debugging info.
- llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+ StringRef getObjCMethodName(const ObjCMethodDecl *FD);
/// getSelectorName - Return selector name. This is used for debugging
/// info.
- llvm::StringRef getSelectorName(Selector S);
+ StringRef getSelectorName(Selector S);
/// getClassName - Get class name including template argument list.
- llvm::StringRef getClassName(RecordDecl *RD);
+ StringRef getClassName(RecordDecl *RD);
/// getVTableName - Get vtable name for the given Class.
- llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
+ StringRef getVTableName(const CXXRecordDecl *Decl);
/// getLineNumber - Get line number for the location. If location is invalid
/// then use current location.
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 62c3a9791d0f..a6147ea7658b 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -14,6 +14,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGOpenCLRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
@@ -46,7 +47,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Field:
case Decl::IndirectField:
case Decl::ObjCIvar:
- case Decl::ObjCAtDefsField:
+ case Decl::ObjCAtDefsField:
case Decl::ParmVar:
case Decl::ImplicitParam:
case Decl::ClassTemplate:
@@ -70,7 +71,8 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
- assert(0 && "Declaration should not be in declstmts!");
+ case Decl::ClassScopeFunctionSpecialization:
+ llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
@@ -112,7 +114,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
case SC_Register:
return EmitAutoVarDecl(D);
case SC_Static: {
- llvm::GlobalValue::LinkageTypes Linkage =
+ llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
// If the function definition has some sort of weak linkage, its
@@ -123,26 +125,28 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
if (getContext().getLangOptions().CPlusPlus)
if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
Linkage = CurFn->getLinkage();
-
+
return EmitStaticVarDecl(D, Linkage);
}
case SC_Extern:
case SC_PrivateExtern:
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
+ case SC_OpenCLWorkGroupLocal:
+ return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
}
- assert(0 && "Unknown storage class");
+ llvm_unreachable("Unknown storage class");
}
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
const char *Separator) {
CodeGenModule &CGM = CGF.CGM;
if (CGF.getContext().getLangOptions().CPlusPlus) {
- llvm::StringRef Name = CGM.getMangledName(&D);
+ StringRef Name = CGM.getMangledName(&D);
return Name.str();
}
-
+
std::string ContextName;
if (!CGF.CurFuncDecl) {
// Better be in a block declared in global scope.
@@ -154,15 +158,15 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
ContextName = Name.getString();
}
else
- assert(0 && "Unknown context for block static var decl");
+ llvm_unreachable("Unknown context for block static var decl");
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
- llvm::StringRef Name = CGM.getMangledName(FD);
+ StringRef Name = CGM.getMangledName(FD);
ContextName = Name.str();
} else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
ContextName = CGF.CurFn->getName();
else
- assert(0 && "Unknown context for static var decl");
-
+ llvm_unreachable("Unknown context for static var decl");
+
return ContextName + Separator + D.getNameAsString();
}
@@ -175,7 +179,7 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
std::string Name = GetStaticDeclName(*this, D, Separator);
- const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
@@ -203,7 +207,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
if (!getContext().getLangOptions().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
- // Since we have a static initializer, this global variable can't
+ // Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
@@ -218,7 +222,7 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
// in the LLVM type system.)
if (GV->getType()->getElementType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
-
+
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
@@ -226,19 +230,19 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
D.isThreadSpecified(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
-
+
// Steal the name of the old global
GV->takeName(OldGV);
-
+
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
-
+
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
}
-
+
GV->setInitializer(Init);
return GV;
}
@@ -259,7 +263,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
EmitVariablyModifiedType(D.getType());
-
+
// Local static block variables must be treated as globals as they may be
// referenced in their RHS initializer block-literal expresion.
CGM.setStaticLocalDeclAddress(&D, GV);
@@ -270,14 +274,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- // FIXME: Merge attribute handling.
- if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
- SourceManager &SM = CGM.getContext().getSourceManager();
- llvm::Constant *Ann =
- CGM.EmitAnnotateAttr(GV, AA,
- SM.getInstantiationLineNumber(D.getLocation()));
- CGM.AddAnnotation(Ann);
- }
+ if (D.hasAttr<AnnotateAttr>())
+ CGM.AddGlobalAnnotations(&D, GV);
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
GV->setSection(SA->getName());
@@ -290,8 +288,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
- const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
- const llvm::Type *LPtrTy =
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ llvm::Type *LPtrTy =
LTy->getPointerTo(CGM.getContext().getTargetAddressSpace(D.getType()));
DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
@@ -348,7 +346,7 @@ namespace {
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
CGF.EmitBlock(RunDtorBB);
}
-
+
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false, Loc);
@@ -360,7 +358,7 @@ namespace {
llvm::Value *Stack;
CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) {
- llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
@@ -384,7 +382,7 @@ namespace {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
const VarDecl &Var;
-
+
CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
const VarDecl *Var)
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
@@ -441,7 +439,7 @@ static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
case Qualifiers::OCL_Autoreleasing:
// nothing to do
break;
-
+
case Qualifiers::OCL_Weak:
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
@@ -508,7 +506,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// actually perform the initialization with an assign.
bool accessedByInit = false;
if (lifetime != Qualifiers::OCL_ExplicitNone)
- accessedByInit = isAccessedBy(D, init);
+ accessedByInit = (capturedByInit || isAccessedBy(D, init));
if (accessedByInit) {
LValue tempLV = lvalue;
// Drill down to the __block object if necessary.
@@ -519,12 +517,12 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
getByRefValueLLVMField(cast<VarDecl>(D))));
}
- const llvm::PointerType *ty
+ llvm::PointerType *ty
= cast<llvm::PointerType>(tempLV.getAddress()->getType());
ty = cast<llvm::PointerType>(ty->getElementType());
llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
-
+
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
EmitARCInitWeak(tempLV.getAddress(), zero);
@@ -613,7 +611,7 @@ void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
break;
}
- EmitStoreOfScalar(init, lvalue);
+ EmitStoreOfScalar(init, lvalue);
}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
@@ -640,7 +638,7 @@ static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
}
return true;
}
-
+
// Anything else is hard and scary.
return false;
}
@@ -655,7 +653,7 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
isa<llvm::ConstantPointerNull>(Init) ||
isa<llvm::UndefValue>(Init))
return;
-
+
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
@@ -663,14 +661,14 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
Builder.CreateStore(Init, Loc, isVolatile);
return;
}
-
+
assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
"Unknown value type!");
-
+
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
if (Elt->isNullValue()) continue;
-
+
// Otherwise, get a pointer to the element and emit it.
emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
isVolatile, Builder);
@@ -694,8 +692,8 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
// plopping in more stores.
unsigned StoreBudget = 6;
uint64_t SizeLimit = 32;
-
- return GlobalSize > SizeLimit &&
+
+ return GlobalSize > SizeLimit &&
canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
}
@@ -730,7 +728,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
- bool NRVO = getContext().getLangOptions().ElideConstructors &&
+ bool NRVO = getContext().getLangOptions().ElideConstructors &&
D.isNRVOVariable();
// If this value is a POD array or struct with a statically
@@ -740,7 +738,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// arrays as long as the initialization is trivial (e.g. if they
// have a non-trivial destructor, but not a non-trivial constructor).
if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
(Ty.isPODType(getContext()) ||
getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)) {
@@ -759,27 +757,27 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Otherwise, tell the initialization code that we're in this case.
emission.IsConstantAggregate = true;
}
-
+
// A normal fixed sized variable becomes an alloca in the entry block,
// unless it's an NRVO variable.
- const llvm::Type *LTy = ConvertTypeForMem(Ty);
-
+ llvm::Type *LTy = ConvertTypeForMem(Ty);
+
if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
DeclPtr = ReturnValue;
-
+
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
// Create a flag that is used to indicate when the NRVO was applied
- // to this variable. Set it to zero to indicate that NRVO was not
+ // to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
-
+
// Record the NRVO flag for this variable.
NRVOFlags[&D] = NRVOFlag;
emission.NRVOFlag = NRVOFlag;
@@ -788,13 +786,13 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
} else {
if (isByRef)
LTy = BuildByRefType(&D);
-
+
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString());
CharUnits allocaAlignment = alignment;
if (isByRef)
- allocaAlignment = std::max(allocaAlignment,
+ allocaAlignment = std::max(allocaAlignment,
getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
Alloc->setAlignment(allocaAlignment.getQuantity());
DeclPtr = Alloc;
@@ -829,7 +827,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
QualType elementType;
llvm::tie(elementCount, elementType) = getVLASize(Ty);
- const llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+ llvm::Type *llvmTy = ConvertTypeForMem(elementType);
// Allocate memory for the array.
llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
@@ -853,6 +851,9 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
}
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, emission.Address);
+
return emission;
}
@@ -875,6 +876,32 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
return false;
}
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
+ const CompoundStmt *CS = SE->getSubStmt();
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI)
+ if (Expr *E = dyn_cast<Expr>((*BI))) {
+ if (isCapturedBy(var, E))
+ return true;
+ }
+ else if (DeclStmt *DS = dyn_cast<DeclStmt>((*BI))) {
+ // special case declarations
+ for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I) {
+ if (VarDecl *VD = dyn_cast<VarDecl>((*I))) {
+ Expr *Init = VD->getInit();
+ if (Init && isCapturedBy(var, Init))
+ return true;
+ }
+ }
+ }
+ else
+ // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
+ // Later, provide code to poke into statements for capture analysis.
+ return true;
+ return false;
+ }
+
for (Stmt::const_child_range children = e->children(); children; ++children)
if (isCapturedBy(var, cast<Expr>(*children)))
return true;
@@ -887,14 +914,14 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
static bool isTrivialInitializer(const Expr *Init) {
if (!Init)
return true;
-
+
if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
if (CXXConstructorDecl *Constructor = Construct->getConstructor())
if (Constructor->isTrivial() &&
Constructor->isDefaultConstructor() &&
!Construct->requiresZeroInitialization())
return true;
-
+
return false;
}
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
@@ -922,7 +949,6 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (isTrivialInitializer(Init))
return;
-
CharUnits alignment = emission.Alignment;
@@ -950,16 +976,16 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(constant != 0 && "Wasn't a simple constant init?");
llvm::Value *SizeVal =
- llvm::ConstantInt::get(IntPtrTy,
+ llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
- const llvm::Type *BP = Int8PtrTy;
+ llvm::Type *BP = Int8PtrTy;
if (Loc->getType() != BP)
- Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+ Loc = Builder.CreateBitCast(Loc, BP);
// If the initializer is all or mostly zeros, codegen with memset then do
// a few stores afterward.
- if (shouldUseMemSetPlusStoresToInitialize(constant,
+ if (shouldUseMemSetPlusStoresToInitialize(constant,
CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
alignment.getQuantity(), isVolatile);
@@ -968,19 +994,19 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
}
} else {
- // Otherwise, create a temporary global with the initializer then
+ // Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
std::string Name = GetStaticDeclName(*this, D, ".");
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
- llvm::GlobalValue::InternalLinkage,
+ llvm::GlobalValue::PrivateLinkage,
constant, Name, 0, false, 0);
GV->setAlignment(alignment.getQuantity());
GV->setUnnamedAddr(true);
-
+
llvm::Value *SrcPtr = GV;
if (SrcPtr->getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
isVolatile);
@@ -1007,7 +1033,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
if (type->isReferenceType()) {
RValue rvalue = EmitReferenceBindingToExpr(init, D);
- if (capturedByInit)
+ if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreThroughLValue(rvalue, lvalue);
} else if (!hasAggregateLLVMType(type)) {
@@ -1019,7 +1045,10 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile());
} else {
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forLValue(lvalue, true, false));
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
}
}
@@ -1058,7 +1087,7 @@ void CodeGenFunction::emitAutoVarTypeCleanup(
case QualType::DK_objc_strong_lifetime:
// Suppress cleanups for pseudo-strong variables.
if (var->isARCPseudoStrong()) return;
-
+
// Otherwise, consider whether to use an EH cleanup or not.
cleanupKind = getARCCleanupKind();
@@ -1094,7 +1123,7 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
emitAutoVarTypeCleanup(emission, dtorKind);
// In GC mode, honor objc_precise_lifetime.
- if (getLangOptions().getGCMode() != LangOptions::NonGC &&
+ if (getLangOptions().getGC() != LangOptions::NonGC &&
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
@@ -1267,11 +1296,9 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
if (arrayDepth) {
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
- llvm::SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
- begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices.begin(),
- gepIndices.end(), "pad.arraybegin");
- end = CGF.Builder.CreateInBoundsGEP(end, gepIndices.begin(),
- gepIndices.end(), "pad.arrayend");
+ SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
}
// Destroy the array. We don't ever need an EH cleanup because we
@@ -1330,7 +1357,7 @@ namespace {
/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
-///
+///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
/// \param array - a value of type elementType*
@@ -1349,7 +1376,7 @@ void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
-///
+///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
/// \param array - a value of type elementType*
@@ -1476,4 +1503,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, DeclPtr);
}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 0ae6a3d2ee90..3b8f830278b2 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -46,7 +46,9 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
} else {
- CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv, true));
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
}
}
@@ -126,17 +128,16 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
}
// Get the destructor function type
- llvm::Type *ArgTys[] = { Int8PtrTy };
llvm::Type *DtorFnTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
- ArgTys, false);
+ Int8PtrTy, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
llvm::Type *Params[] = { DtorFnTy, Int8PtrTy, Int8PtrTy };
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
- const llvm::FunctionType *AtExitFnTy =
+ llvm::FunctionType *AtExitFnTy =
llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
@@ -167,15 +168,15 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
static llvm::Function *
CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- const llvm::FunctionType *FTy,
- llvm::StringRef Name) {
+ llvm::FunctionType *FTy,
+ StringRef Name) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
if (!CGM.getContext().getLangOptions().AppleKext) {
// Set the section if needed.
if (const char *Section =
- CGM.getContext().Target.getStaticInitSectionSpecifier())
+ CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
@@ -188,7 +189,7 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
llvm::GlobalVariable *Addr) {
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -225,7 +226,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -234,7 +235,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
if (!PrioritizedCXXGlobalInits.empty()) {
- llvm::SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
@@ -259,7 +260,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
- const llvm::FunctionType *FTy
+ llvm::FunctionType *FTy
= llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
@@ -351,7 +352,7 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(getContext().VoidTy, args,
FunctionType::ExtInfo());
- const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 418bea6ee402..5e4fb9881937 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -29,9 +29,8 @@ using namespace CodeGen;
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
- llvm::Type *ArgTys[] = { CGF.SizeTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.SizeTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
}
@@ -39,9 +38,8 @@ static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
// void __cxa_free_exception(void *thrown_exception);
- llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
@@ -51,7 +49,7 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
// void (*dest) (void *));
llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
@@ -60,7 +58,7 @@ static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
// void __cxa_rethrow();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
@@ -69,9 +67,8 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
// void *__cxa_get_exception_ptr(void*);
- llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
}
@@ -79,9 +76,8 @@ static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
// void *__cxa_begin_catch(void*);
- llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8PtrTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
}
@@ -89,7 +85,7 @@ static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
// void __cxa_end_catch();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
@@ -98,17 +94,15 @@ static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
// void __cxa_call_unexepcted(void *thrown_exception);
- llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
- llvm::Type *ArgTys[] = { Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
@@ -116,9 +110,8 @@ llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
}
llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
- llvm::Type *ArgTys[] = { Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(VoidTy, ArgTys, /*IsVarArgs=*/false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
if (CGM.getLangOptions().SjLjExceptions)
return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
@@ -128,10 +121,10 @@ llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
// void __terminate();
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
- llvm::StringRef name;
+ StringRef name;
// In C++, use std::terminate().
if (CGF.getLangOptions().CPlusPlus)
@@ -145,10 +138,9 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
}
static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
- llvm::StringRef Name) {
- llvm::Type *ArgTys[] = { CGF.Int8PtrTy };
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.VoidTy, ArgTys, /*IsVarArgs=*/false);
+ StringRef Name) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
return CGF.CGM.CreateRuntimeFunction(FTy, Name);
}
@@ -247,21 +239,34 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
continue;
}
- // Otherwise, it has to be a selector call.
- if (!isa<llvm::EHSelectorInst>(User)) return false;
+ // Otherwise, it has to be a landingpad instruction.
+ llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(User);
+ if (!LPI) return false;
- llvm::EHSelectorInst *Selector = cast<llvm::EHSelectorInst>(User);
- for (unsigned I = 2, E = Selector->getNumArgOperands(); I != E; ++I) {
+ for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
// Look for something that would've been returned by the ObjC
// runtime's GetEHType() method.
- llvm::GlobalVariable *GV
- = dyn_cast<llvm::GlobalVariable>(Selector->getArgOperand(I));
- if (!GV) continue;
-
- // ObjC EH selector entries are always global variables with
- // names starting like this.
- if (GV->getName().startswith("OBJC_EHTYPE"))
- return false;
+ llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
+ if (LPI->isCatch(I)) {
+ // Check if the catch value has the ObjC prefix.
+ if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ } else {
+ // Check if any of the filter values have the ObjC prefix.
+ llvm::Constant *CVal = cast<llvm::Constant>(Val);
+ for (llvm::User::op_iterator
+ II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
+ if (llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
}
}
@@ -274,7 +279,7 @@ static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
// For now, this is really a Darwin-specific operation.
- if (!Context.Target.getTriple().isOSDarwin())
+ if (!Context.getTargetInfo().getTriple().isOSDarwin())
return;
// If we're not in ObjC++ -fexceptions, there's nothing to do.
@@ -314,12 +319,6 @@ static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
}
-/// Returns the value to inject into a selector to indicate the
-/// presence of a cleanup.
-static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
- return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
-}
-
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
@@ -346,7 +345,7 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- const llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
@@ -375,6 +374,14 @@ llvm::Value *CodeGenFunction::getEHSelectorSlot() {
return EHSelectorSlot;
}
+llvm::Value *CodeGenFunction::getExceptionFromSlot() {
+ return Builder.CreateLoad(getExceptionSlot(), "exn");
+}
+
+llvm::Value *CodeGenFunction::getSelectorFromSlot() {
+ return Builder.CreateLoad(getEHSelectorSlot(), "sel");
+}
+
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!E->getSubExpr()) {
if (getInvokeDest()) {
@@ -397,7 +404,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
QualType ThrowType = E->getSubExpr()->getType();
// Now allocate the exception object.
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
@@ -475,6 +482,43 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
}
}
+/// Emit the dispatch block for a filter scope if necessary.
+static void emitFilterDispatchBlock(CodeGenFunction &CGF,
+ EHFilterScope &filterScope) {
+ llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) return;
+ if (dispatchBlock->use_empty()) {
+ delete dispatchBlock;
+ return;
+ }
+
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // If this isn't a catch-all filter, we need to check whether we got
+ // here because the filter triggered.
+ if (filterScope.getNumFilters()) {
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+ llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
+
+ llvm::Value *zero = CGF.Builder.getInt32(0);
+ llvm::Value *failsFilter =
+ CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock());
+
+ CGF.EmitBlock(unexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ llvm::Value *exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateCall(getUnexpectedFn(CGF), exn)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!CGM.getLangOptions().CXXExceptions)
return;
@@ -492,6 +536,8 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
EHStack.popTerminate();
}
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
+ emitFilterDispatchBlock(*this, filterScope);
EHStack.popFilter();
}
}
@@ -533,6 +579,50 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
}
}
+llvm::BasicBlock *
+CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
+ // The dispatch block for the end of the scope chain is a block that
+ // just resumes unwinding.
+ if (si == EHStack.stable_end())
+ return getEHResumeBlock();
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &scope = *EHStack.find(si);
+
+ llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) {
+ switch (scope.getKind()) {
+ case EHScope::Catch: {
+ // Apply a special case to a single catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(scope);
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ dispatchBlock = catchScope.getHandler(0).Block;
+
+ // Otherwise, make a dispatch block.
+ } else {
+ dispatchBlock = createBasicBlock("catch.dispatch");
+ }
+ break;
+ }
+
+ case EHScope::Cleanup:
+ dispatchBlock = createBasicBlock("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ dispatchBlock = createBasicBlock("filter.dispatch");
+ break;
+
+ case EHScope::Terminate:
+ dispatchBlock = getTerminateHandler();
+ break;
+ }
+ scope.setCachedEHDispatchBlock(dispatchBlock);
+ }
+ return dispatchBlock;
+}
+
/// Check whether this is a non-EH scope, i.e. a scope which doesn't
/// affect exception handling. Currently, the only non-EH scopes are
/// normal-only cleanup scopes.
@@ -629,280 +719,143 @@ const CleanupHackLevel_t CleanupHackLevel = CHL_MandatoryCleanup;
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(EHStack.requiresLandingPad());
- for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
- assert(ir != EHStack.end() &&
- "stack requiring landing pad is nothing but non-EH scopes?");
-
- // If this is a terminate scope, just use the singleton terminate
- // landing pad.
- if (isa<EHTerminateScope>(*ir))
- return getTerminateLandingPad();
-
- // If this isn't an EH scope, iterate; otherwise break out.
- if (!isNonEHScope(*ir)) break;
- ++ir;
+ EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
+ switch (innermostEHScope.getKind()) {
+ case EHScope::Terminate:
+ return getTerminateLandingPad();
- // We haven't checked this scope for a cached landing pad yet.
- if (llvm::BasicBlock *LP = ir->getCachedLandingPad())
- return LP;
+ case EHScope::Catch:
+ case EHScope::Cleanup:
+ case EHScope::Filter:
+ if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
+ return lpad;
}
// Save the current IR generation state.
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
- const EHPersonality &Personality = EHPersonality::get(getLangOptions());
+ const EHPersonality &personality = EHPersonality::get(getLangOptions());
// Create and configure the landing pad.
- llvm::BasicBlock *LP = createBasicBlock("lpad");
- EmitBlock(LP);
+ llvm::BasicBlock *lpad = createBasicBlock("lpad");
+ EmitBlock(lpad);
+
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, personality), 0);
+
+ llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
+ Builder.CreateStore(LPadExn, getExceptionSlot());
+ llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
+ Builder.CreateStore(LPadSel, getEHSelectorSlot());
// Save the exception pointer. It's safe to use a single exception
// pointer per function because EH cleanups can never have nested
// try/catches.
- llvm::CallInst *Exn =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
- Exn->setDoesNotThrow();
- Builder.CreateStore(Exn, getExceptionSlot());
-
- // Build the selector arguments.
- llvm::SmallVector<llvm::Value*, 8> EHSelector;
- EHSelector.push_back(Exn);
- EHSelector.push_back(getOpaquePersonalityFn(CGM, Personality));
+ // Build the landingpad instruction.
// Accumulate all the handlers in scope.
- llvm::DenseMap<llvm::Value*, UnwindDest> EHHandlers;
- UnwindDest CatchAll;
- bool HasEHCleanup = false;
- bool HasEHFilter = false;
- llvm::SmallVector<llvm::Value*, 8> EHFilters;
+ bool hasCatchAll = false;
+ bool hasCleanup = false;
+ bool hasFilter = false;
+ SmallVector<llvm::Value*, 4> filterTypes;
+ llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
I != E; ++I) {
switch (I->getKind()) {
case EHScope::Cleanup:
- if (!HasEHCleanup)
- HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
- // We otherwise don't care about cleanups.
+ // If we have a cleanup, remember that.
+ hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
continue;
case EHScope::Filter: {
assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
- assert(!CatchAll.isValid() && "EH filter reached after catch-all");
-
- // Filter scopes get added to the selector in weird ways.
- EHFilterScope &Filter = cast<EHFilterScope>(*I);
- HasEHFilter = true;
-
- // Add all the filter values which we aren't already explicitly
- // catching.
- for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) {
- llvm::Value *FV = Filter.getFilter(I);
- if (!EHHandlers.count(FV))
- EHFilters.push_back(FV);
- }
+ assert(!hasCatchAll && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the landingpad in weird ways.
+ EHFilterScope &filter = cast<EHFilterScope>(*I);
+ hasFilter = true;
+
+ // Add all the filter values.
+ for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
+ filterTypes.push_back(filter.getFilter(i));
goto done;
}
case EHScope::Terminate:
// Terminate scopes are basically catch-alls.
- assert(!CatchAll.isValid());
- CatchAll = UnwindDest(getTerminateHandler(),
- EHStack.getEnclosingEHCleanup(I),
- cast<EHTerminateScope>(*I).getDestIndex());
+ assert(!hasCatchAll);
+ hasCatchAll = true;
goto done;
case EHScope::Catch:
break;
}
- EHCatchScope &Catch = cast<EHCatchScope>(*I);
- for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) {
- EHCatchScope::Handler Handler = Catch.getHandler(HI);
-
- // Catch-all. We should only have one of these per catch.
- if (!Handler.Type) {
- assert(!CatchAll.isValid());
- CatchAll = UnwindDest(Handler.Block,
- EHStack.getEnclosingEHCleanup(I),
- Handler.Index);
- continue;
+ EHCatchScope &catchScope = cast<EHCatchScope>(*I);
+ for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
+ EHCatchScope::Handler handler = catchScope.getHandler(hi);
+
+ // If this is a catch-all, register that and abort.
+ if (!handler.Type) {
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
}
// Check whether we already have a handler for this type.
- UnwindDest &Dest = EHHandlers[Handler.Type];
- if (Dest.isValid()) continue;
-
- EHSelector.push_back(Handler.Type);
- Dest = UnwindDest(Handler.Block,
- EHStack.getEnclosingEHCleanup(I),
- Handler.Index);
+ if (catchTypes.insert(handler.Type))
+ // If not, add it directly to the landingpad.
+ LPadInst->addClause(handler.Type);
}
-
- // Stop if we found a catch-all.
- if (CatchAll.isValid()) break;
}
done:
- unsigned LastToEmitInLoop = EHSelector.size();
-
- // If we have a catch-all, add null to the selector.
- if (CatchAll.isValid()) {
- EHSelector.push_back(getCatchAllValue(*this));
+ // If we have a catch-all, add null to the landingpad.
+ assert(!(hasCatchAll && hasFilter));
+ if (hasCatchAll) {
+ LPadInst->addClause(getCatchAllValue(*this));
// If we have an EH filter, we need to add those handlers in the
- // right place in the selector, which is to say, at the end.
- } else if (HasEHFilter) {
- // Create a filter expression: an integer constant saying how many
- // filters there are (+1 to avoid ambiguity with 0 for cleanup),
- // followed by the filter types. The personality routine only
- // lands here if the filter doesn't match.
- EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(),
- EHFilters.size() + 1));
- EHSelector.append(EHFilters.begin(), EHFilters.end());
+ // right place in the landingpad, which is to say, at the end.
+ } else if (hasFilter) {
+ // Create a filter expression: a constant array indicating which filter
+ // types there are. The personality routine only lands here if the filter
+ // doesn't match.
+ llvm::SmallVector<llvm::Constant*, 8> Filters;
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(!filterTypes.empty() ?
+ filterTypes[0]->getType() : Int8PtrTy,
+ filterTypes.size());
+
+ for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
+ Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
+ llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
+ LPadInst->addClause(FilterArray);
// Also check whether we need a cleanup.
- if (CleanupHackLevel == CHL_MandatoryCatchall || HasEHCleanup)
- EHSelector.push_back(CleanupHackLevel == CHL_MandatoryCatchall
- ? getCatchAllValue(*this)
- : getCleanupValue(*this));
+ if (hasCleanup)
+ LPadInst->setCleanup(true);
// Otherwise, signal that we at least have cleanups.
- } else if (CleanupHackLevel == CHL_MandatoryCatchall || HasEHCleanup) {
- EHSelector.push_back(CleanupHackLevel == CHL_MandatoryCatchall
- ? getCatchAllValue(*this)
- : getCleanupValue(*this));
-
- // At the MandatoryCleanup hack level, we don't need to actually
- // spuriously tell the unwinder that we have cleanups, but we do
- // need to always be prepared to handle cleanups.
- } else if (CleanupHackLevel == CHL_MandatoryCleanup) {
- // Just don't decrement LastToEmitInLoop.
-
- } else {
- assert(LastToEmitInLoop > 2);
- LastToEmitInLoop--;
+ } else if (CleanupHackLevel == CHL_MandatoryCatchall || hasCleanup) {
+ if (CleanupHackLevel == CHL_MandatoryCatchall)
+ LPadInst->addClause(getCatchAllValue(*this));
+ else
+ LPadInst->setCleanup(true);
}
- assert(EHSelector.size() >= 3 && "selector call has only two arguments!");
+ assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
+ "landingpad instruction has no clauses!");
// Tell the backend how to generate the landing pad.
- llvm::CallInst *Selection =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- EHSelector, "eh.selector");
- Selection->setDoesNotThrow();
-
- // Save the selector value in mandatory-cleanup mode.
- if (CleanupHackLevel == CHL_MandatoryCleanup)
- Builder.CreateStore(Selection, getEHSelectorSlot());
-
- // Select the right handler.
- llvm::Value *llvm_eh_typeid_for =
- CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
-
- // The results of llvm_eh_typeid_for aren't reliable --- at least
- // not locally --- so we basically have to do this as an 'if' chain.
- // We walk through the first N-1 catch clauses, testing and chaining,
- // and then fall into the final clause (which is either a cleanup, a
- // filter (possibly with a cleanup), a catch-all, or another catch).
- for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
- llvm::Value *Type = EHSelector[I];
- UnwindDest Dest = EHHandlers[Type];
- assert(Dest.isValid() && "no handler entry for value in selector?");
-
- // Figure out where to branch on a match. As a debug code-size
- // optimization, if the scope depth matches the innermost cleanup,
- // we branch directly to the catch handler.
- llvm::BasicBlock *Match = Dest.getBlock();
- bool MatchNeedsCleanup =
- Dest.getScopeDepth() != EHStack.getInnermostEHCleanup();
- if (MatchNeedsCleanup)
- Match = createBasicBlock("eh.match");
-
- llvm::BasicBlock *Next = createBasicBlock("eh.next");
-
- // Check whether the exception matches.
- llvm::CallInst *Id
- = Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(Type, Int8PtrTy));
- Id->setDoesNotThrow();
- Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
- Match, Next);
-
- // Emit match code if necessary.
- if (MatchNeedsCleanup) {
- EmitBlock(Match);
- EmitBranchThroughEHCleanup(Dest);
- }
-
- // Continue to the next match.
- EmitBlock(Next);
- }
-
- // Emit the final case in the selector.
- // This might be a catch-all....
- if (CatchAll.isValid()) {
- assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
- EmitBranchThroughEHCleanup(CatchAll);
-
- // ...or an EH filter...
- } else if (HasEHFilter) {
- llvm::Value *SavedSelection = Selection;
-
- // First, unwind out to the outermost scope if necessary.
- if (EHStack.hasEHCleanups()) {
- // The end here might not dominate the beginning, so we might need to
- // save the selector if we need it.
- llvm::AllocaInst *SelectorVar = 0;
- if (HasEHCleanup) {
- SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var");
- Builder.CreateStore(Selection, SelectorVar);
- }
-
- llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
- EmitBranchThroughEHCleanup(UnwindDest(CleanupContBB, EHStack.stable_end(),
- EHStack.getNextEHDestIndex()));
- EmitBlock(CleanupContBB);
-
- if (HasEHCleanup)
- SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector");
- }
-
- // If there was a cleanup, we'll need to actually check whether we
- // landed here because the filter triggered.
- if (CleanupHackLevel != CHL_Ideal || HasEHCleanup) {
- llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
-
- llvm::Constant *Zero = llvm::ConstantInt::get(Int32Ty, 0);
- llvm::Value *FailsFilter =
- Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
- Builder.CreateCondBr(FailsFilter, UnexpectedBB, getRethrowDest().getBlock());
-
- EmitBlock(UnexpectedBB);
- }
-
- // Call __cxa_call_unexpected. This doesn't need to be an invoke
- // because __cxa_call_unexpected magically filters exceptions
- // according to the last landing pad the exception was thrown
- // into. Seriously.
- Builder.CreateCall(getUnexpectedFn(*this),
- Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotReturn();
- Builder.CreateUnreachable();
-
- // ...or a normal catch handler...
- } else if (CleanupHackLevel == CHL_Ideal && !HasEHCleanup) {
- llvm::Value *Type = EHSelector.back();
- EmitBranchThroughEHCleanup(EHHandlers[Type]);
-
- // ...or a cleanup.
- } else {
- EmitBranchThroughEHCleanup(getRethrowDest());
- }
+ Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
// Restore the old IR generation state.
- Builder.restoreIP(SavedIP);
+ Builder.restoreIP(savedIP);
- return LP;
+ return lpad;
}
namespace {
@@ -954,11 +907,11 @@ static void InitCatchParam(CodeGenFunction &CGF,
const VarDecl &CatchParam,
llvm::Value *ParamAddr) {
// Load the exception from where the landing pad saved it.
- llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
CanQualType CatchType =
CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
- const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+ llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
// If we're catching by reference, we can just cast the object
// pointer to the appropriate pointer.
@@ -1001,7 +954,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- const llvm::Type *PtrTy =
+ llvm::Type *PtrTy =
cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
// Create the temporary and write the adjusted pointer into it.
@@ -1037,7 +990,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Otherwise, it returns a pointer into the exception object.
- const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
if (IsComplex) {
@@ -1055,7 +1008,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
assert(isa<RecordType>(CatchType) && "unexpected catch type!");
- const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -1086,8 +1039,10 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.EHStack.pushTerminate();
// Perform the copy construction.
- CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
- false));
+ CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
// Leave the terminate scope.
CGF.EHStack.popTerminate();
@@ -1127,7 +1082,7 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
VarDecl *CatchParam = S->getExceptionDecl();
if (!CatchParam) {
- llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
CallBeginCatch(CGF, Exn, true);
return;
}
@@ -1146,16 +1101,112 @@ namespace {
};
}
+/// Emit the structure of the dispatch block for the given catch scope.
+/// It is an invariant that the dispatch block already exists.
+static void emitCatchDispatchBlock(CodeGenFunction &CGF,
+ EHCatchScope &catchScope) {
+ llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
+ assert(dispatchBlock);
+
+ // If there's only a single catch-all, getEHDispatchBlock returned
+ // that catch-all as the dispatch block.
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ assert(dispatchBlock == catchScope.getHandler(0).Block);
+ return;
+ }
+
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
+ assert(i < e && "ran off end of handlers!");
+ const EHCatchScope::Handler &handler = catchScope.getHandler(i);
+
+ llvm::Value *typeValue = handler.Type;
+ assert(typeValue && "fell into catch-all case!");
+ typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
+
+ // Figure out the next block.
+ bool nextIsEnd;
+ llvm::BasicBlock *nextBlock;
+
+ // If this is the last handler, we're at the end, and the next
+ // block is the block for the enclosing EH scope.
+ if (i + 1 == e) {
+ nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
+ nextIsEnd = true;
+
+ // If the next handler is a catch-all, we're at the end, and the
+ // next block is that handler.
+ } else if (catchScope.getHandler(i+1).isCatchAll()) {
+ nextBlock = catchScope.getHandler(i+1).Block;
+ nextIsEnd = true;
+
+ // Otherwise, we're not at the end and we need a new block.
+ } else {
+ nextBlock = CGF.createBasicBlock("catch.fallthrough");
+ nextIsEnd = false;
+ }
+
+ // Figure out the catch type's index in the LSDA's type table.
+ llvm::CallInst *typeIndex =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
+ typeIndex->setDoesNotThrow();
+
+ llvm::Value *matchesTypeIndex =
+ CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
+ CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
+
+ // If the next handler is a catch-all, we're completely done.
+ if (nextIsEnd) {
+ CGF.Builder.restoreIP(savedIP);
+ return;
+
+ // Otherwise we need to emit and continue at that block.
+ } else {
+ CGF.EmitBlock(nextBlock);
+ }
+ }
+
+ llvm_unreachable("fell out of loop!");
+}
+
+void CodeGenFunction::popCatchScope() {
+ EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
+ if (catchScope.hasEHBranches())
+ emitCatchDispatchBlock(*this, catchScope);
+ EHStack.popCatch();
+}
+
void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
unsigned NumHandlers = S.getNumHandlers();
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
assert(CatchScope.getNumHandlers() == NumHandlers);
+ // If the catch was not required, bail out now.
+ if (!CatchScope.hasEHBranches()) {
+ EHStack.popCatch();
+ return;
+ }
+
+ // Emit the structure of the EH dispatch for this catch.
+ emitCatchDispatchBlock(*this, CatchScope);
+
// Copy the handler blocks off before we pop the EH stack. Emitting
// the handlers might scribble on this memory.
- llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
memcpy(Handlers.data(), CatchScope.begin(),
NumHandlers * sizeof(EHCatchScope::Handler));
+
EHStack.popCatch();
// The fall-through block.
@@ -1171,12 +1222,19 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
isa<CXXConstructorDecl>(CurCodeDecl);
- for (unsigned I = 0; I != NumHandlers; ++I) {
- llvm::BasicBlock *CatchBlock = Handlers[I].Block;
- EmitBlock(CatchBlock);
+ // Perversely, we emit the handlers backwards precisely because we
+ // want them to appear in source order. In all of these cases, the
+ // catch block will have exactly one predecessor, which will be a
+ // particular block in the catch dispatch. However, in the case of
+ // a catch-all, one of the dispatch blocks will branch to two
+ // different handlers, and EmitBlockAfterUses will cause the second
+ // handler to be moved before the first.
+ for (unsigned I = NumHandlers; I != 0; --I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
+ EmitBlockAfterUses(CatchBlock);
// Catch the exception if this isn't a catch-all.
- const CXXCatchStmt *C = S.getHandler(I);
+ const CXXCatchStmt *C = S.getHandler(I-1);
// Enter a cleanup scope, including the catch variable and the
// end-catch.
@@ -1315,7 +1373,7 @@ void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
- const llvm::FunctionType *rethrowFnTy =
+ llvm::FunctionType *rethrowFnTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
SavedExnVar = 0;
@@ -1358,7 +1416,8 @@ void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
// Leave the finally catch-all.
EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
- CGF.EHStack.popCatch();
+
+ CGF.popCatchScope();
// If there are any references to the catch-all block, emit it.
if (catchBB->use_empty()) {
@@ -1371,13 +1430,13 @@ void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
// If there's a begin-catch function, call it.
if (BeginCatchFn) {
- exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ exn = CGF.getExceptionFromSlot();
CGF.Builder.CreateCall(BeginCatchFn, exn)->setDoesNotThrow();
}
// If we need to remember the exception pointer to rethrow later, do so.
if (SavedExnVar) {
- if (!exn) exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ if (!exn) exn = CGF.getExceptionFromSlot();
CGF.Builder.CreateStore(exn, SavedExnVar);
}
@@ -1405,19 +1464,11 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
- llvm::CallInst *Exn =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
- Exn->setDoesNotThrow();
-
const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
-
- // Tell the backend what the exception table should be:
- // nothing but a catch-all.
- llvm::Value *Args[3] = { Exn, getOpaquePersonalityFn(CGM, Personality),
- getCatchAllValue(*this) };
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
- Args, "eh.selector")
- ->setDoesNotThrow();
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, Personality), 0);
+ LPadInst->addClause(getCatchAllValue(*this));
llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
@@ -1451,26 +1502,26 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
return TerminateHandler;
}
-CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
- if (RethrowBlock.isValid()) return RethrowBlock;
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
+ if (EHResumeBlock) return EHResumeBlock;
CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
// We emit a jump to a notional label at the outermost unwind state.
- llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
- Builder.SetInsertPoint(Unwind);
+ EHResumeBlock = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(EHResumeBlock);
const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
- llvm::StringRef RethrowName = Personality.getCatchallRethrowFnName();
+ StringRef RethrowName = Personality.getCatchallRethrowFnName();
if (!RethrowName.empty()) {
Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
- Builder.CreateLoad(getExceptionSlot()))
+ getExceptionFromSlot())
->setDoesNotReturn();
} else {
- llvm::Value *Exn = Builder.CreateLoad(getExceptionSlot());
+ llvm::Value *Exn = getExceptionFromSlot();
switch (CleanupHackLevel) {
case CHL_MandatoryCatchall:
@@ -1481,12 +1532,21 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
->setDoesNotReturn();
break;
case CHL_MandatoryCleanup: {
- // In mandatory-cleanup mode, we should use llvm.eh.resume.
- llvm::Value *Selector = Builder.CreateLoad(getEHSelectorSlot());
- Builder.CreateCall2(CGM.getIntrinsic(llvm::Intrinsic::eh_resume),
- Exn, Selector)
- ->setDoesNotReturn();
- break;
+ // In mandatory-cleanup mode, we should use 'resume'.
+
+ // Recreate the landingpad's return value for the 'resume' instruction.
+ llvm::Value *Exn = getExceptionFromSlot();
+ llvm::Value *Sel = getSelectorFromSlot();
+
+ llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
+ Sel->getType(), NULL);
+ llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
+ LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
+
+ Builder.CreateResume(LPadVal);
+ Builder.restoreIP(SavedIP);
+ return EHResumeBlock;
}
case CHL_Ideal:
// In an idealized mode where we don't have to worry about the
@@ -1502,7 +1562,5 @@ CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
Builder.restoreIP(SavedIP);
- RethrowBlock = UnwindDest(Unwind, EHStack.stable_end(), 0);
- return RethrowBlock;
+ return EHResumeBlock;
}
-
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
index 5a743b51f66f..d0216160d50f 100644
--- a/lib/CodeGen/CGException.h
+++ b/lib/CodeGen/CGException.h
@@ -24,15 +24,15 @@ namespace CodeGen {
/// The exceptions personality for a function. When
class EHPersonality {
- llvm::StringRef PersonalityFn;
+ StringRef PersonalityFn;
// If this is non-null, this personality requires a non-standard
// function for rethrowing an exception after a catchall cleanup.
// This function must have prototype void(void*).
- llvm::StringRef CatchallRethrowFn;
+ StringRef CatchallRethrowFn;
- EHPersonality(llvm::StringRef PersonalityFn,
- llvm::StringRef CatchallRethrowFn = llvm::StringRef())
+ EHPersonality(StringRef PersonalityFn,
+ StringRef CatchallRethrowFn = StringRef())
: PersonalityFn(PersonalityFn),
CatchallRethrowFn(CatchallRethrowFn) {}
@@ -46,8 +46,8 @@ public:
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
- llvm::StringRef getPersonalityFnName() const { return PersonalityFn; }
- llvm::StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
+ StringRef getPersonalityFnName() const { return PersonalityFn; }
+ StringRef getCatchallRethrowFnName() const { return CatchallRethrowFn; }
};
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index a7e8003eaab5..bd4e553991b3 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -18,6 +18,7 @@
#include "CGDebugInfo.h"
#include "CGRecordLayout.h"
#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
@@ -34,7 +35,7 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
unsigned addressSpace =
cast<llvm::PointerType>(value->getType())->getAddressSpace();
- const llvm::PointerType *destType = Int8PtrTy;
+ llvm::PointerType *destType = Int8PtrTy;
if (addressSpace)
destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
@@ -44,8 +45,8 @@ llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
-llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
- const llvm::Twine &Name) {
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
if (!Builder.isNamePreserving())
return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
@@ -59,7 +60,7 @@ void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
}
llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
- const llvm::Twine &Name) {
+ const Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -68,7 +69,7 @@ llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
}
llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
- const llvm::Twine &Name) {
+ const Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -136,7 +137,10 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
if (E->getType()->isAnyComplexType())
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, IsInit));
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
+ AggValueSlot::IsDestructed_t(IsInit),
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsAliased_t(!IsInit)));
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
@@ -174,7 +178,7 @@ namespace {
}
static llvm::Value *
-CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
+CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
@@ -183,7 +187,7 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
Out.flush();
- const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+ llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
// Create the reference temporary.
llvm::GlobalValue *RefTemp =
@@ -310,7 +314,7 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
return ReferenceTemporary;
}
- llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
while (true) {
E = E->IgnoreParens();
@@ -354,8 +358,12 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ AggValueSlot::IsDestructed_t isDestructed
+ = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
- InitializedDecl != 0);
+ isDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
}
if (InitializedDecl) {
@@ -466,7 +474,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
else {
switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
case Qualifiers::OCL_None:
- assert(0 && "Not a reference temporary that needs to be deallocated");
+ llvm_unreachable(
+ "Not a reference temporary that needs to be deallocated");
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
// Nothing to do.
@@ -578,7 +587,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
return RValue::get(0);
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Type *EltTy = ConvertType(CTy->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
}
@@ -652,7 +661,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
- case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::ParenExprClass:
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
case Expr::GenericSelectionExprClass:
return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
case Expr::PredefinedExprClass:
@@ -731,7 +741,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo) {
- llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr);
if (Volatile)
Load->setVolatile(true);
if (Alignment)
@@ -812,7 +822,7 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isVectorElt()) {
llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
- LV.isVolatileQualified(), "tmp");
+ LV.isVolatileQualified());
return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
"vecext"));
}
@@ -833,7 +843,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertType(LV.getType());
+ llvm::Type *ResLTy = ConvertType(LV.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Compute the result as an OR of all of the individual component accesses.
@@ -857,7 +867,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
}
// Cast to the access type.
- const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
AI.AccessWidth,
CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -905,7 +915,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified(), "tmp");
+ LV.isVolatileQualified());
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -915,13 +925,13 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
- return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt));
}
// Always use shuffle vector to try to retain the original program structure
unsigned NumResultElts = ExprVT->getNumElements();
- llvm::SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
@@ -929,7 +939,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
- MaskV, "tmp");
+ MaskV);
return RValue::get(Vec);
}
@@ -943,7 +953,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified(), "tmp");
+ Dst.isVolatileQualified());
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
@@ -1002,7 +1012,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
- const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Type *ResultType = ConvertType(getContext().LongTy);
llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
llvm::Value *dst = RHS;
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
@@ -1029,7 +1039,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
// Get the output type.
- const llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
// Get the source value, truncated to the width of the bit-field.
@@ -1045,7 +1055,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Return the new value of the bit-field, if requested.
if (Result) {
// Cast back to the proper type for result.
- const llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Type *SrcTy = Src.getScalarVal()->getType();
llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
"bf.reload.val");
@@ -1082,10 +1092,10 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
}
// Cast to the access type.
- const llvm::Type *AccessLTy =
+ llvm::Type *AccessLTy =
llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
- const llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
+ llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
Ptr = Builder.CreateBitCast(Ptr, PTy);
// Extract the piece of the bit-field value to write in this access, limited
@@ -1134,7 +1144,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// This access turns into a read/modify/write of the vector. Load the input
// value now.
llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified(), "tmp");
+ Dst.isVolatileQualified());
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1147,7 +1157,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// Use shuffle vector is the src and destination are the same number of
// elements and restore the vector mask since it is on the side it will be
// stored.
- llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
@@ -1156,13 +1166,13 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
- MaskV, "tmp");
+ MaskV);
} else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
- llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ SmallVector<llvm::Constant*, 4> ExtMask;
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -1172,9 +1182,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
- ExtMaskV, "tmp");
+ ExtMaskV);
// build identity
- llvm::SmallVector<llvm::Constant*, 4> Mask;
+ SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -1184,16 +1194,16 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
}
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
} else {
// We should never shorten the vector
- assert(0 && "unexpected shorten vector length");
+ llvm_unreachable("unexpected shorten vector length");
}
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
- Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
@@ -1203,11 +1213,23 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// generating write-barries API. It is currently a global, ivar,
// or neither.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
- LValue &LV) {
- if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
+ LValue &LV,
+ bool IsMemberAccess=false) {
+ if (Ctx.getLangOptions().getGC() == LangOptions::NonGC)
return;
if (isa<ObjCIvarRefExpr>(E)) {
+ QualType ExpTy = E->getType();
+ if (IsMemberAccess && ExpTy->isPointerType()) {
+ // If ivar is a structure pointer, assigning to field of
+ // this struct follows gcc's behavior and makes it a non-ivar
+ // writer-barrier conservatively.
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType()) {
+ LV.setObjCIvar(false);
+ return;
+ }
+ }
LV.setObjCIvar(true);
ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
LV.setBaseIvarExp(Exp->getBase());
@@ -1227,12 +1249,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
if (LV.isObjCIvar()) {
// If cast is to a structure pointer, follow gcc's behavior and make it
// a non-ivar write-barrier.
@@ -1251,17 +1273,17 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
@@ -1277,9 +1299,9 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LV.setGlobalObjCRef(false);
return;
}
-
+
if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
- setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
// We don't know if member is an 'ivar', but this flag is looked at
// only in the context of LV.isObjCIvar().
LV.setObjCArray(E->getType()->isArrayType());
@@ -1290,7 +1312,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
static llvm::Value *
EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
llvm::Value *V, llvm::Type *IRType,
- llvm::StringRef Name = llvm::StringRef()) {
+ StringRef Name = StringRef()) {
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
}
@@ -1302,7 +1324,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
- V = CGF.Builder.CreateLoad(V, "tmp");
+ V = CGF.Builder.CreateLoad(V);
V = EmitBitCastOfLValueToProperType(CGF, V,
CGF.getTypes().ConvertTypeForMem(E->getType()));
@@ -1325,7 +1347,7 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
QualType NoProtoType =
CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
NoProtoType = CGF.getContext().getPointerType(NoProtoType);
- V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
}
}
unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
@@ -1361,7 +1383,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
V = BuildBlockByrefAddress(V, VD);
if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V, "tmp");
+ V = Builder.CreateLoad(V);
V = EmitBitCastOfLValueToProperType(*this, V,
getTypes().ConvertTypeForMem(E->getType()));
@@ -1378,7 +1400,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, fn);
- assert(false && "Unhandled DeclRefExpr");
+ llvm_unreachable("Unhandled DeclRefExpr");
// an invalid LValue, but the assert will
// ensure that this point is never reached.
@@ -1398,7 +1420,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
switch (E->getOpcode()) {
- default: assert(0 && "Unknown unary operator lvalue!");
+ default: llvm_unreachable("Unknown unary operator lvalue!");
case UO_Deref: {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
@@ -1411,7 +1433,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ getContext().getLangOptions().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1474,7 +1496,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
std::string GlobalVarName;
switch (Type) {
- default: assert(0 && "Invalid type");
+ default: llvm_unreachable("Invalid type");
case PredefinedExpr::Func:
GlobalVarName = "__func__.";
break;
@@ -1486,7 +1508,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
break;
}
- llvm::StringRef FnName = CurFn->getName();
+ StringRef FnName = CurFn->getName();
if (FnName.startswith("\01"))
FnName = FnName.substr(1);
GlobalVarName += FnName;
@@ -1646,9 +1668,9 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
ArrayAlignment = ArrayLV.getAlignment();
if (getContext().getLangOptions().isSignedOverflowDefined())
- Address = Builder.CreateGEP(ArrayPtr, Args, Args+2, "arrayidx");
+ Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
- Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
@@ -1672,7 +1694,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
+ getContext().getLangOptions().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -1681,10 +1703,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
static
llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
- llvm::SmallVector<unsigned, 4> &Elts) {
- llvm::SmallVector<llvm::Constant*, 4> CElts;
+ SmallVector<unsigned, 4> &Elts) {
+ SmallVector<llvm::Constant*, 4> CElts;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
@@ -1725,7 +1747,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
// Encode the element access list into a vector of unsigned indices.
- llvm::SmallVector<unsigned, 4> Indices;
+ SmallVector<unsigned, 4> Indices;
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
@@ -1735,7 +1757,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
llvm::Constant *BaseElts = Base.getExtVectorElts();
- llvm::SmallVector<llvm::Constant *, 4> CElts;
+ SmallVector<llvm::Constant *, 4> CElts;
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
if (isa<llvm::ConstantAggregateZero>(BaseElts))
@@ -1784,8 +1806,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
- assert(false && "Unhandled member declaration!");
- return LValue();
+ llvm_unreachable("Unhandled member declaration!");
}
LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
@@ -1867,6 +1888,9 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
CGM.getTypes().ConvertTypeForMem(type),
field->getName());
+ if (field->hasAttr<AnnotateAttr>())
+ addr = EmitFieldAnnotations(field, addr);
+
unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
LV.getQuals().addCVRQualifiers(cvr);
@@ -1896,7 +1920,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
const CGRecordLayout &RL =
CGM.getTypes().getCGRecordLayout(Field->getParent());
unsigned idx = RL.getLLVMFieldNo(Field);
- llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx);
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
@@ -1904,7 +1928,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
// type.
- const llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ llvm::Type *llvmType = ConvertTypeForMem(FieldType);
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
@@ -2048,9 +2072,10 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
case CK_AnyPointerToBlockPointerCast:
- case CK_ObjCProduceObject:
- case CK_ObjCConsumeObject:
- case CK_ObjCReclaimReturnedObject: {
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
@@ -2069,7 +2094,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ConstructorConversion:
case CK_UserDefinedConversion:
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
return EmitLValue(E->getSubExpr());
case CK_UncheckedDerivedToBase:
@@ -2143,8 +2169,7 @@ LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
- RValue RV = EmitReferenceBindingToExpr(E->GetTemporaryExpr(),
- /*InitializedDecl=*/0);
+ RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
return MakeAddrLValue(RV.getScalarVal(), E->getType());
}
@@ -2155,11 +2180,8 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
- if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(E->getLocStart());
- DI->UpdateLineDirectiveRegion(Builder);
- DI->EmitStopPoint(Builder);
- }
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, E->getLocStart());
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
@@ -2168,14 +2190,13 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
return EmitCXXMemberCallExpr(CE, ReturnValue);
- const Decl *TargetDecl = 0;
- if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
- TargetDecl = DRE->getDecl();
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
- if (unsigned builtinID = FD->getBuiltinID())
- return EmitBuiltinExpr(FD, builtinID, E);
- }
+ if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
+ return EmitCUDAKernelCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = E->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E);
}
if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
@@ -2306,7 +2327,7 @@ LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
&& "binding l-value to type which needs a temporary");
- AggValueSlot Slot = CreateAggTemp(E->getType(), "tmp");
+ AggValueSlot Slot = CreateAggTemp(E->getType());
EmitCXXConstructExpr(E, Slot);
return MakeAddrLValue(Slot.getAddr(), E->getType());
}
@@ -2319,7 +2340,7 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
- Slot.setLifetimeExternallyManaged();
+ Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
return MakeAddrLValue(Slot.getAddr(), E->getType());
@@ -2406,8 +2427,35 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
- return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
- Callee, ReturnValue, Args, TargetDecl);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType);
+
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type
+ // that does not include a prototype, [the default argument
+ // promotions are performed]. If the number of arguments does not
+ // equal the number of parameters, the behavior is undefined. If
+ // the function is defined with a type that includes a prototype,
+ // and either the prototype ends with an ellipsis (, ...) or the
+ // types of the arguments after promotion are not compatible with
+ // the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a
+ // prototype, and the types of the arguments after promotion are
+ // not compatible with those of the parameters after promotion,
+ // the behavior is undefined [except in some trivial cases].
+ // That is, in the general case, we should assume that a call
+ // through an unprototyped function type works like a *non-variadic*
+ // call. The way we make this work is to cast to the exact type
+ // of the promoted arguments.
+ if (isa<FunctionNoProtoType>(FnType) &&
+ !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) {
+ assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0))
+ ->isVarArg());
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false);
+ CalleeTy = CalleeTy->getPointerTo();
+ Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+ }
+
+ return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
}
LValue CodeGenFunction::
@@ -2428,3 +2476,279 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
return MakeAddrLValue(AddV, MPT->getPointeeType());
}
+
+static void
+EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
+ llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+ uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
+ if (E->isCmpXChg()) {
+ // Note that cmpxchg only supports specifying one ordering and
+ // doesn't support weak cmpxchg, at least at the moment.
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
+ LoadVal2->setAlignment(Align);
+ llvm::AtomicCmpXchgInst *CXI =
+ CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
+ CXI->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
+ StoreVal1->setAlignment(Align);
+ llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
+ CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+ return;
+ }
+
+ if (E->getOp() == AtomicExpr::Load) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
+ Load->setAtomic(Order);
+ Load->setAlignment(Size);
+ Load->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
+ StoreDest->setAlignment(Align);
+ return;
+ }
+
+ if (E->getOp() == AtomicExpr::Store) {
+ assert(!Dest && "Store does not return a value");
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
+ Store->setAtomic(Order);
+ Store->setAlignment(Size);
+ Store->setVolatile(E->isVolatile());
+ return;
+ }
+
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ switch (E->getOp()) {
+ case AtomicExpr::CmpXchgWeak:
+ case AtomicExpr::CmpXchgStrong:
+ case AtomicExpr::Store:
+ case AtomicExpr::Load: assert(0 && "Already handled!");
+ case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
+ case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
+ case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
+ case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
+ case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
+ case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
+ }
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::AtomicRMWInst *RMWI =
+ CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+ RMWI->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+ StoreDest->setAlignment(Align);
+}
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static llvm::Value *
+EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
+ llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+ return DeclPtr;
+}
+
+static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
+ llvm::Value *Dest) {
+ if (Ty->isAnyComplexType())
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
+ if (CGF.hasAggregateLLVMType(Ty))
+ return RValue::getAggregate(Dest);
+ return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
+}
+
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+ QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+ QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+ CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+ uint64_t Size = sizeChars.getQuantity();
+ CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
+ unsigned Align = alignChars.getQuantity();
+ unsigned MaxInlineWidth =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+ llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
+ Ptr = EmitScalarExpr(E->getPtr());
+ Order = EmitScalarExpr(E->getOrder());
+ if (E->isCmpXChg()) {
+ Val1 = EmitScalarExpr(E->getVal1());
+ Val2 = EmitValToTemp(*this, E->getVal2());
+ OrderFail = EmitScalarExpr(E->getOrderFail());
+ (void)OrderFail; // OrderFail is unused at the moment
+ } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
+ MemTy->isPointerType()) {
+ // For pointers, we're required to do a bit of math: adding 1 to an int*
+ // is not the same as adding 1 to a uintptr_t.
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ } else if (E->getOp() != AtomicExpr::Load) {
+ Val1 = EmitValToTemp(*this, E->getVal1());
+ }
+
+ if (E->getOp() != AtomicExpr::Store && !Dest)
+ Dest = CreateMemTemp(E->getType(), ".atomicdst");
+
+ if (UseLibcall) {
+ // FIXME: Finalize what the libcalls are actually supposed to look like.
+ // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+ return EmitUnsupportedRValue(E, "atomic library call");
+ }
+#if 0
+ if (UseLibcall) {
+ const char* LibCallName;
+ switch (E->getOp()) {
+ case AtomicExpr::CmpXchgWeak:
+ LibCallName = "__atomic_compare_exchange_generic"; break;
+ case AtomicExpr::CmpXchgStrong:
+ LibCallName = "__atomic_compare_exchange_generic"; break;
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+ case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break;
+ case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
+ case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break;
+ }
+ llvm::SmallVector<QualType, 4> Params;
+ CallArgList Args;
+ QualType RetTy = getContext().VoidTy;
+ if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
+ getContext().VoidPtrTy);
+ if (E->getOp() != AtomicExpr::Load)
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ if (E->isCmpXChg()) {
+ Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
+ getContext().VoidPtrTy);
+ RetTy = getContext().IntTy;
+ }
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ if (E->isCmpXChg())
+ return Res;
+ if (E->getOp() == AtomicExpr::Store)
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), Dest);
+ }
+#endif
+ llvm::Type *IPtrTy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
+ llvm::Value *OrigDest = Dest;
+ Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
+ if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ break;
+ default: // invalid order
+ // We should not ever get here normally, but it's hard to
+ // enforce that in general.
+ break;
+ }
+ if (E->getOp() == AtomicExpr::Store)
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+ }
+
+ // Long case, when Order isn't obviously constant.
+
+ // Create all the relevant BB's
+ llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
+ *AcqRelBB = 0, *SeqCstBB = 0;
+ MonotonicBB = createBasicBlock("monotonic", CurFn);
+ if (E->getOp() != AtomicExpr::Store)
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ if (E->getOp() != AtomicExpr::Load)
+ ReleaseBB = createBasicBlock("release", CurFn);
+ if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ // Create the switch for the split
+ // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+ // doesn't matter unless someone is crazy enough to use something that
+ // doesn't fold to a constant for the ordering.
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ Builder.CreateBr(ContBB);
+ if (E->getOp() != AtomicExpr::Store) {
+ Builder.SetInsertPoint(AcquireBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+ }
+ if (E->getOp() != AtomicExpr::Load) {
+ Builder.SetInsertPoint(ReleaseBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+ }
+ if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+ Builder.SetInsertPoint(AcqRelBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+ }
+ Builder.SetInsertPoint(SeqCstBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ // Cleanup and return
+ Builder.SetInsertPoint(ContBB);
+ if (E->getOp() == AtomicExpr::Store)
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 915ffd6034e2..97754d5c0ba6 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -35,11 +35,18 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot Dest;
bool IgnoreResult;
+ /// We want to use 'dest' as the return slot except under two
+ /// conditions:
+ /// - The destination slot requires garbage collection, so we
+ /// need to use the GC API.
+ /// - The destination slot is potentially aliased.
+ bool shouldUseDestForReturnSlot() const {
+ return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
+ }
+
ReturnValueSlot getReturnValueSlot() const {
- // If the destination slot requires garbage collection, we can't
- // use the real return value slot, because we have to use the GC
- // API.
- if (Dest.requiresGCollection()) return ReturnValueSlot();
+ if (!shouldUseDestForReturnSlot())
+ return ReturnValueSlot();
return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
}
@@ -69,7 +76,13 @@ public:
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
- void EmitGCMove(const Expr *E, RValue Src);
+ void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+
+ AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
+ if (CGF.getLangOptions().getGC() && TypeRequiresGCollection(T))
+ return AggValueSlot::NeedsGCBarriers;
+ return AggValueSlot::DoesNotNeedGCBarriers;
+ }
bool TypeRequiresGCollection(QualType T);
@@ -141,6 +154,9 @@ public:
void EmitNullInitializationToLValue(LValue Address);
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+ void VisitAtomicExpr(AtomicExpr *E) {
+ CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ }
};
} // end anonymous namespace.
@@ -173,23 +189,27 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
return Record->hasObjectMember();
}
-/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
+/// \brief Perform the final move to DestPtr if for some reason
+/// getReturnValueSlot() didn't use it directly.
///
/// The idea is that you do something like this:
/// RValue Result = EmitSomething(..., getReturnValueSlot());
-/// EmitGCMove(E, Result);
-/// If GC doesn't interfere, this will cause the result to be emitted
-/// directly into the return value slot. If GC does interfere, a final
-/// move will be performed.
-void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
- if (Dest.requiresGCollection()) {
- CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
- llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
- CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, Dest.getAddr(),
- Src.getAggregateAddr(),
- SizeVal);
+/// EmitMoveFromReturnSlot(E, Result);
+///
+/// If nothing interferes, this will cause the result to be emitted
+/// directly into the return value slot. Otherwise, a final move
+/// will be performed.
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+ if (shouldUseDestForReturnSlot()) {
+ // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
+ // The possibility of undef rvalues complicates that a lot,
+ // though, so we can't really assert.
+ return;
}
+
+ // Otherwise, do a final copy,
+ assert(Dest.getAddr() != Src.getAggregateAddr());
+ EmitFinalDestCopy(E, Src, /*Ignore*/ true);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
@@ -215,7 +235,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
if (Dest.requiresGCollection()) {
CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
Dest.getAddr(),
@@ -301,16 +321,15 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_DerivedToBase:
case CK_BaseToDerived:
case CK_UncheckedDerivedToBase: {
- assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
+ llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
"should have been unpacked before we got here");
- break;
}
case CK_GetObjCProperty: {
LValue LV = CGF.EmitLValue(E->getSubExpr());
assert(LV.isPropertyRef());
RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
break;
}
@@ -348,7 +367,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingRealToComplex:
@@ -361,9 +381,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
- case CK_ObjCProduceObject:
- case CK_ObjCConsumeObject:
- case CK_ObjCReclaimReturnedObject:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -375,12 +396,12 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
}
RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
- EmitGCMove(E, RV);
+ EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
@@ -426,10 +447,9 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// as it may change the 'forwarding' field via call to Block_copy.
LValue RHS = CGF.EmitLValue(E->getRHS());
LValue LHS = CGF.EmitLValue(E->getLHS());
- bool GCollection = false;
- if (CGF.getContext().getLangOptions().getGCMode())
- GCollection = TypeRequiresGCollection(E->getLHS()->getType());
- Dest = AggValueSlot::forLValue(LHS, true, GCollection);
+ Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
EmitFinalDestCopy(E, RHS, true);
return;
}
@@ -451,13 +471,11 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
} else {
- bool GCollection = false;
- if (CGF.getContext().getLangOptions().getGCMode())
- GCollection = TypeRequiresGCollection(E->getLHS()->getType());
-
// Codegen the RHS so that it stores directly into the LHS.
- AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
- GCollection);
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
EmitFinalDestCopy(E, LHS, true);
}
@@ -476,7 +494,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
// Save whether the destination's lifetime is externally managed.
- bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
+ bool isExternallyDestructed = Dest.isExternallyDestructed();
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
@@ -489,8 +507,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// If the result of an agg expression is unused, then the emission
// of the LHS might need to create a destination slot. That's fine
// with us, and we can safely emit the RHS into the same slot, but
- // we shouldn't claim that its lifetime is externally managed.
- Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
+ // we shouldn't claim that it's already being destructed.
+ Dest.setExternallyDestructed(isExternallyDestructed);
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
@@ -518,16 +536,17 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Ensure that we have a slot, but if we already do, remember
- // whether its lifetime was externally managed.
- bool WasManaged = Dest.isLifetimeExternallyManaged();
+ // whether it was externally destructed.
+ bool wasExternallyDestructed = Dest.isExternallyDestructed();
Dest = EnsureSlot(E->getType());
- Dest.setLifetimeExternallyManaged();
+
+ // We're going to push a destructor if there isn't already one.
+ Dest.setExternallyDestructed();
Visit(E->getSubExpr());
- // Set up the temporary's destructor if its lifetime wasn't already
- // being managed.
- if (!WasManaged)
+ // Push that destructor we promised.
+ if (!wasExternallyDestructed)
CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
}
@@ -596,7 +615,10 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(type)) {
- CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, true, false,
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
Dest.isZeroed()));
} else if (LV.isSimple()) {
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
@@ -647,9 +669,9 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- const llvm::PointerType *APType =
+ llvm::PointerType *APType =
cast<llvm::PointerType>(DestPtr->getType());
- const llvm::ArrayType *AType =
+ llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
uint64_t NumInitElements = E->getNumInits();
@@ -676,7 +698,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr, indices, indices+2, "arrayinit.begin");
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
@@ -839,7 +861,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We'll need to enter cleanup scopes in case any of the member
// initializers throw an exception.
- llvm::SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
@@ -948,7 +970,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
- CGF.getContext().Target.getPointerWidth(0));
+ CGF.getContext().getTargetInfo().getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
@@ -999,7 +1021,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
Loc = CGF.Builder.CreateBitCast(Loc, BP);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
@@ -1036,7 +1058,9 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(LV, false));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
return LV;
}
@@ -1049,7 +1073,9 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
- Record->hasTrivialCopyAssignment()) &&
+ Record->hasTrivialCopyAssignment() ||
+ Record->hasTrivialMoveConstructor() ||
+ Record->hasTrivialMoveAssignment()) &&
"Trying to aggregate-copy a type without a trivial copy "
"constructor or assignment operator");
// Ignore empty classes in C++.
@@ -1088,24 +1114,24 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
- const llvm::Type *DBP =
+ llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ llvm::Type *DBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP);
- const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
- const llvm::Type *SBP =
+ llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::Type *SBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
+ if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -1116,7 +1142,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
CharUnits size = TypeInfo.first;
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 4396f567f2f9..78db5903de54 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -13,6 +13,7 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "CodeGenFunction.h"
+#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGDebugInfo.h"
@@ -206,16 +207,17 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
return RValue::get(0);
- if (MD->isCopyAssignmentOperator()) {
- // We don't like to generate the trivial copy assignment operator when
- // it isn't necessary; just produce the proper effect here.
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
EmitAggregateCopy(This, RHS, CE->getType());
return RValue::get(This);
}
if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
CE->arg_begin(), CE->arg_end());
@@ -236,7 +238,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
FInfo = &CGM.getTypes().getFunctionInfo(MD);
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty
+ llvm::Type *Ty
= CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
// C++ [class.virtual]p12:
@@ -333,16 +335,12 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
LValue LV = EmitLValue(E->getArg(0));
llvm::Value *This = LV.getAddress();
- if (MD->isCopyAssignmentOperator()) {
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
- if (ClassDecl->hasTrivialCopyAssignment()) {
- assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
- "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
- llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
- QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
- return RValue::get(This);
- }
+ if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
+ MD->isTrivial()) {
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
}
llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
@@ -350,6 +348,54 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
E->arg_begin() + 1, E->arg_end());
}
+RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
+}
+
+static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ const CXXRecordDecl *Base) {
+ if (Base->isEmpty())
+ return;
+
+ DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
+
+ const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
+ CharUnits Size = Layout.getNonVirtualSize();
+ CharUnits Align = Layout.getNonVirtualAlign();
+
+ llvm::Value *SizeVal = CGF.CGM.getSize(Size);
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ // TODO: isZeroInitializable can be over-conservative in the case where a
+ // virtual base contains a member pointer.
+ if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
+ llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ NullVariable->setAlignment(Align.getQuantity());
+ llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity());
+}
+
void
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
AggValueSlot Dest) {
@@ -360,8 +406,19 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// constructor, as can be the case with a non-user-provided default
// constructor, emit the zero initialization now, unless destination is
// already zeroed.
- if (E->requiresZeroInitialization() && !Dest.isZeroed())
- EmitNullInitialization(Dest.getAddr(), E->getType());
+ if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ assert(0 && "Delegating constructor should not need zeroing");
+ case CXXConstructExpr::CK_Complete:
+ EmitNullInitialization(Dest.getAddr(), E->getType());
+ break;
+ case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_NonVirtualBase:
+ EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
+ break;
+ }
+ }
// If this is a call to a trivial default constructor, do nothing.
if (CD->isTrivial() && CD->isDefaultConstructor())
@@ -483,7 +540,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// the cookie size would bring the total size >= 0.
bool isSigned
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
- const llvm::IntegerType *numElementsType
+ llvm::IntegerType *numElementsType
= cast<llvm::IntegerType>(numElements->getType());
unsigned numElementsWidth = numElementsType->getBitWidth();
@@ -703,63 +760,85 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
AllocType.isVolatileQualified());
else {
AggValueSlot Slot
- = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), true);
+ = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(Init, Slot);
}
}
void
CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
- llvm::Value *NewPtr,
- llvm::Value *NumElements) {
+ QualType elementType,
+ llvm::Value *beginPtr,
+ llvm::Value *numElements) {
// We have a POD type.
if (E->getNumConstructorArgs() == 0)
return;
-
- const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
- llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
- Builder.CreateStore(Zero, IndexPtr);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
-
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
- "arrayidx");
- StoreAnyExprIntoOneUnit(*this, E, Address);
-
- EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
+
+ // Check if the number of elements is constant.
+ bool checkZero = true;
+ if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
+ // If it's constant zero, skip the whole loop.
+ if (constNum->isZero()) return;
+
+ checkZero = false;
+ }
+
+ // Find the end of the array, hoisted out of the loop.
+ llvm::Value *endPtr =
+ Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
+
+ // Create the continuation block.
+ llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
+
+ // If we need to check for zero, do so now.
+ if (checkZero) {
+ llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(beginPtr, endPtr,
+ "array.isempty");
+ Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
+ EmitBlock(nonEmptyBB);
+ }
+
+ // Enter the loop.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
+
+ EmitBlock(loopBB);
+
+ // Set up the current-element phi.
+ llvm::PHINode *curPtr =
+ Builder.CreatePHI(beginPtr->getType(), 2, "array.cur");
+ curPtr->addIncoming(beginPtr, entryBB);
+
+ // Enter a partial-destruction cleanup if necessary.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ if (needsEHCleanup(dtorKind)) {
+ pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ }
+
+ // Emit the initializer into this element.
+ StoreAnyExprIntoOneUnit(*this, E, curPtr);
+
+ // Leave the cleanup if we entered one.
+ if (cleanup != EHStack.stable_end())
+ DeactivateCleanupBlock(cleanup);
+
+ // Advance to the next element.
+ llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
+
+ // Check whether we've gotten to the end of the array and, if so,
+ // exit the loop.
+ llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
+ Builder.CreateCondBr(isEnd, contBB, loopBB);
+ curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
+
+ EmitBlock(contBB);
}
static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
@@ -771,6 +850,7 @@ static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
}
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ QualType ElementType,
llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
@@ -783,11 +863,10 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
return;
- if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
+ if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
// Optimization: since zero initialization will just set the memory
// to all zeroes, generate a single memset to do it in one shot.
- EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
- AllocSizeWithoutCookie);
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
return;
}
@@ -803,11 +882,10 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
// Optimization: since zero initialization will just set the memory
// to all zeroes, generate a single memset to do it in one shot.
- EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
- AllocSizeWithoutCookie);
- return;
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
+ return;
} else {
- CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
+ CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
return;
}
}
@@ -819,7 +897,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
if (E->hasInitializer() &&
!Ctor->getParent()->hasUserDeclaredConstructor() &&
!Ctor->getParent()->isEmpty())
- CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
+ CGF.EmitNullInitialization(NewPtr, ElementType);
CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
NewPtr, E->constructor_arg_begin(),
@@ -1086,15 +1164,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
-
- assert((allocSize == allocSizeWithoutCookie) ==
- CalculateCookiePadding(*this, E).isZero());
- if (allocSize != allocSizeWithoutCookie) {
- assert(E->isArray());
- allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
- numElements,
- E, allocType);
- }
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
@@ -1105,21 +1174,28 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
operatorDeleteCleanup = EHStack.stable_begin();
}
- const llvm::Type *elementPtrTy
+ assert((allocSize == allocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(E->isArray());
+ allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
+ numElements,
+ E, allocType);
+ }
+
+ llvm::Type *elementPtrTy
= ConvertTypeForMem(allocType)->getPointerTo(AS);
llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
+ EmitNewInitializer(*this, E, allocType, result, numElements,
+ allocSizeWithoutCookie);
if (E->isArray()) {
- EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
-
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
- const llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ llvm::Type *resultType = ConvertTypeForMem(E->getType());
if (result->getType() != resultType)
result = Builder.CreateBitCast(result, resultType);
- } else {
- EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
}
// Deactivate the 'operator delete' cleanup if we finished
@@ -1206,7 +1282,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor = 0;
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (!RD->hasTrivialDestructor()) {
+ if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
@@ -1218,7 +1294,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
ElementType);
}
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
Dtor_Complete),
/*isVariadic=*/false);
@@ -1307,7 +1383,7 @@ namespace {
// Pass the original requested size as the second argument.
if (DeleteFTy->getNumArgs() == 2) {
QualType size_t = DeleteFTy->getArgType(1);
- const llvm::IntegerType *SizeTy
+ llvm::IntegerType *SizeTy
= cast<llvm::IntegerType>(CGF.ConvertType(size_t));
CharUnits ElementTypeSize =
@@ -1406,7 +1482,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
if (DeleteTy->isConstantArrayType()) {
llvm::Value *Zero = Builder.getInt32(0);
- llvm::SmallVector<llvm::Value*,8> GEP;
+ SmallVector<llvm::Value*,8> GEP;
GEP.push_back(Zero); // point at the outermost array
@@ -1420,7 +1496,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
}
assert(ConvertTypeForMem(DeleteTy) ==
@@ -1439,8 +1515,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
// void __cxa_bad_typeid();
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy =
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
@@ -1454,7 +1530,7 @@ static void EmitBadTypeidCall(CodeGenFunction &CGF) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
const Expr *E,
- const llvm::Type *StdTypeInfoPtrTy) {
+ llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
@@ -1487,7 +1563,7 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- const llvm::Type *StdTypeInfoPtrTy =
+ llvm::Type *StdTypeInfoPtrTy =
ConvertType(E->getType())->getPointerTo();
if (E->isTypeOperand()) {
@@ -1528,7 +1604,7 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(Int8PtrTy, Args, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
@@ -1537,8 +1613,8 @@ static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
// void __cxa_bad_cast();
- const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
- const llvm::FunctionType *FTy =
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, false);
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
@@ -1554,9 +1630,9 @@ static llvm::Value *
EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
QualType SrcTy, QualType DestTy,
llvm::BasicBlock *CastEnd) {
- const llvm::Type *PtrDiffLTy =
+ llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
if (PTy->getPointeeType()->isVoidType()) {
@@ -1626,7 +1702,7 @@ EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
QualType DestTy) {
- const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
if (DestTy->isPointerType())
return llvm::Constant::getNullValue(DestLTy);
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 35cff1d72714..4a31bcfbe9e0 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -103,8 +103,7 @@ public:
ComplexPairTy VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
- assert(0 && "Stmt can't have complex result type!");
- return ComplexPairTy();
+ llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
@@ -119,6 +118,7 @@ public:
// l-values.
ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitBlockDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
@@ -266,6 +266,10 @@ public:
ComplexPairTy VisitInitListExpr(InitListExpr *E);
ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+
+ ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getComplexVal();
+ }
};
} // end anonymous namespace.
@@ -312,8 +316,8 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "complex expression");
- const llvm::Type *EltTy =
- CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
}
@@ -402,16 +406,18 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingComplexToReal:
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
- case CK_ObjCProduceObject:
- case CK_ObjCConsumeObject:
- case CK_ObjCReclaimReturnedObject:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -524,40 +530,40 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *DSTr, *DSTi;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
// (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c
- llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d
- llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+ llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
- llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c
- llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d
- llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+ llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
- llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c
- llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d
- llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad
+ llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
- DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
- DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
} else {
// (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
- llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
- llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
- llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
- llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
- llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
- llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
- llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
- llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
- DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
- DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
} else {
- DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
- DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6);
}
}
@@ -735,12 +741,19 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
Ignore = TestAndClearIgnoreImag();
(void)Ignore;
assert (Ignore == false && "init list ignored");
- if (E->getNumInits())
+
+ if (E->getNumInits() == 2) {
+ llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
+ llvm::Value *Imag = CGF.EmitScalarExpr(E->getInit(1));
+ return ComplexPairTy(Real, Imag);
+ } else if (E->getNumInits() == 1) {
return Visit(E->getInit(0));
+ }
// Empty init list intializes to null
+ assert(E->getNumInits() == 0 && "Unexpected number of inits");
QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
- const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Type* LTy = CGF.ConvertType(Ty);
llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
return ComplexPairTy(zeroConstant, zeroConstant);
}
@@ -751,7 +764,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
if (!ArgPtr) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 45e44dda0f58..3997866ea68e 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -138,13 +138,12 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// We need to add padding.
CharUnits PadSize = Context.toCharUnitsFromBits(
llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
- Context.Target.getCharAlign()));
+ Context.getTargetInfo().getCharAlign()));
AppendPadding(PadSize);
}
- uint64_t FieldSize =
- Field->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+ uint64_t FieldSize = Field->getBitWidthValue(Context);
llvm::APInt FieldValue = CI->getValue();
@@ -213,7 +212,7 @@ void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
// padding and then an hole for our i8 to get plopped into.
assert(isa<llvm::ArrayType>(LastElt->getType()) &&
"Expected array padding of undefs");
- const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
assert(AT->getElementType()->isIntegerTy(CharWidth) &&
AT->getNumElements() != 0 &&
"Expected non-empty array padding of undefs");
@@ -281,7 +280,7 @@ void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
if (PadSize.isZero())
return;
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
if (PadSize > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
@@ -317,7 +316,7 @@ void ConstStructBuilder::ConvertStructToPacked() {
CharUnits NumChars =
AlignedElementOffsetInChars - ElementOffsetInChars;
- const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
if (NumChars > CharUnits::One())
Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
@@ -364,7 +363,7 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
continue;
// Don't emit anonymous bitfields, they just affect layout.
- if (Field->isBitField() && !Field->getIdentifier()) {
+ if (Field->isUnnamedBitfield()) {
LastFD = (*Field);
continue;
}
@@ -435,11 +434,11 @@ llvm::Constant *ConstStructBuilder::
// Pick the type to use. If the type is layout identical to the ConvertType
// type then use it, otherwise use whatever the builder produced for us.
- const llvm::StructType *STy =
+ llvm::StructType *STy =
llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
Builder.Elements,Builder.Packed);
- const llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
- if (const llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
+ llvm::Type *ILETy = CGM.getTypes().ConvertType(ILE->getType());
+ if (llvm::StructType *ILESTy = dyn_cast<llvm::StructType>(ILETy)) {
if (ILESTy->isLayoutIdentical(STy))
STy = ILESTy;
}
@@ -513,7 +512,7 @@ public:
llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
E->getRHS()->getType(), CGF);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Type *ResultType = ConvertType(E->getType());
LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
@@ -527,7 +526,7 @@ public:
llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
if (!C) return 0;
- const llvm::Type *destType = ConvertType(E->getType());
+ llvm::Type *destType = ConvertType(E->getType());
switch (E->getCastKind()) {
case CK_ToUnion: {
@@ -571,7 +570,8 @@ public:
case CK_NoOp:
return C;
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_LValueBitCast:
case CK_BitCast:
@@ -585,9 +585,10 @@ public:
case CK_GetObjCProperty:
case CK_ToVoid:
case CK_Dynamic:
- case CK_ObjCProduceObject:
- case CK_ObjCConsumeObject:
- case CK_ObjCReclaimReturnedObject:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
return 0;
// These might need to be supported for constexpr.
@@ -680,9 +681,9 @@ public:
return Visit(ILE->getInit(0));
std::vector<llvm::Constant*> Elts;
- const llvm::ArrayType *AType =
+ llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
- const llvm::Type *ElemTy = AType->getElementType();
+ llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
// Initialising an array requires us to automatically
@@ -719,7 +720,7 @@ public:
std::vector<llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
- const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
Types, true);
return llvm::ConstantStruct::get(SType, Elts);
}
@@ -740,6 +741,22 @@ public:
}
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isAnyComplexType() && ILE->getNumInits() == 2) {
+ // Complex type with element initializers
+ Expr *Real = ILE->getInit(0);
+ Expr *Imag = ILE->getInit(1);
+ llvm::Constant *Complex[2];
+ Complex[0] = CGM.EmitConstantExpr(Real, Real->getType(), CGF);
+ if (!Complex[0])
+ return 0;
+ Complex[1] = CGM.EmitConstantExpr(Imag, Imag->getType(), CGF);
+ if (!Complex[1])
+ return 0;
+ llvm::StructType *STy =
+ cast<llvm::StructType>(ConvertType(ILE->getType()));
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+
if (ILE->getType()->isScalarType()) {
// We have a scalar in braces. Just use the first element.
if (ILE->getNumInits() > 0) {
@@ -762,10 +779,7 @@ public:
if (ILE->getType()->isVectorType())
return 0;
- assert(0 && "Unable to handle InitListExpr");
- // Get rid of control reaches end of void function warning.
- // Not reached.
- return 0;
+ llvm_unreachable("Unable to handle InitListExpr");
}
llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -789,8 +803,8 @@ public:
if (E->getNumArgs()) {
assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
- assert(E->getConstructor()->isCopyConstructor() &&
- "trivial ctor has argument but isn't a copy ctor");
+ assert(E->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
Expr *Arg = E->getArg(0);
assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
@@ -831,7 +845,7 @@ public:
}
// Utility methods
- const llvm::Type *ConvertType(QualType T) {
+ llvm::Type *ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
@@ -948,10 +962,9 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
if (Success && !Result.HasSideEffects) {
switch (Result.Val.getKind()) {
case APValue::Uninitialized:
- assert(0 && "Constant expressions should be initialized.");
- return 0;
+ llvm_unreachable("Constant expressions should be initialized.");
case APValue::LValue: {
- const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
llvm::Constant *Offset =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
Result.Val.getLValueOffset().getQuantity());
@@ -962,9 +975,9 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
// Apply offset if necessary.
if (!Offset->isNullValue()) {
- const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
- Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
}
@@ -994,7 +1007,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
Result.Val.getInt());
if (C->getType()->isIntegerTy(1)) {
- const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
@@ -1013,8 +1026,13 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
NULL);
return llvm::ConstantStruct::get(STy, Complex);
}
- case APValue::Float:
- return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+ case APValue::Float: {
+ const llvm::APFloat &Init = Result.Val.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
case APValue::ComplexFloat: {
llvm::Constant *Complex[2];
@@ -1030,7 +1048,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
- llvm::SmallVector<llvm::Constant *, 4> Inits;
+ SmallVector<llvm::Constant *, 4> Inits;
unsigned NumElts = Result.Val.getVectorLength();
if (Context.getLangOptions().AltiVec &&
@@ -1064,7 +1082,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
if (C && C->getType()->isIntegerTy(1)) {
- const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
@@ -1181,14 +1199,14 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
}
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
- const llvm::Type *baseType,
+ llvm::Type *baseType,
const CXXRecordDecl *base);
static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
const CXXRecordDecl *record,
bool asCompleteObject) {
const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
- const llvm::StructType *structure =
+ llvm::StructType *structure =
(asCompleteObject ? layout.getLLVMType()
: layout.getBaseSubobjectLLVMType());
@@ -1212,7 +1230,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
continue;
unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
- const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
}
@@ -1245,7 +1263,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
// We might have already laid this field out.
if (elements[fieldIndex]) continue;
- const llvm::Type *baseType = structure->getElementType(fieldIndex);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
}
}
@@ -1261,7 +1279,7 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
/// Emit the null constant for a base subobject.
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
- const llvm::Type *baseType,
+ llvm::Type *baseType,
const CXXRecordDecl *base) {
const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
@@ -1277,7 +1295,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
// Otherwise, some bases are represented as arrays of i8 if the size
// of the base is smaller than its corresponding LLVM type. Figure
// out how many elements this base array has.
- const llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
unsigned numBaseElements = baseArrayType->getNumElements();
// Fill in null data member pointers.
@@ -1287,7 +1305,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
// Now go through all other elements and zero them out.
if (numBaseElements) {
- const llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Type *i8 = llvm::Type::getInt8Ty(CGM.getLLVMContext());
llvm::Constant *i8_zero = llvm::Constant::getNullValue(i8);
for (unsigned i = 0; i != numBaseElements; ++i) {
if (!baseElements[i])
@@ -1312,7 +1330,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
for (unsigned i = 0; i != NumElements; ++i)
Array[i] = Element;
- const llvm::ArrayType *ATy =
+ llvm::ArrayType *ATy =
cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
return llvm::ConstantArray::get(ATy, Array);
}
@@ -1330,3 +1348,8 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
// A NULL pointer is represented as -1.
return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
}
+
+llvm::Constant *
+CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
+ return ::EmitNullConstant(*this, Record, false);
+}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index a73e667e780e..3a9fbeed9d69 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -78,7 +78,7 @@ public:
return I;
}
- const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
@@ -153,8 +153,7 @@ public:
Value *VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
- assert(0 && "Stmt can't have complex result type!");
- return 0;
+ llvm_unreachable("Stmt can't have complex result type!");
}
Value *VisitExpr(Expr *S);
@@ -343,6 +342,10 @@ public:
}
// C++
+ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
@@ -510,6 +513,7 @@ public:
return CGF.EmitObjCStringLiteral(E);
}
Value *VisitAsTypeExpr(AsTypeExpr *CE);
+ Value *VisitAtomicExpr(AtomicExpr *AE);
};
} // end anonymous namespace.
@@ -548,14 +552,24 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (DstType->isVoidType()) return 0;
+ llvm::Type *SrcTy = Src->getType();
+
+ // Floating casts might be a bit special: if we're doing casts to / from half
+ // FP, we should go via special intrinsics.
+ if (SrcType->isHalfType()) {
+ Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src);
+ SrcType = CGF.getContext().FloatTy;
+ SrcTy = llvm::Type::getFloatTy(VMContext);
+ }
+
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
- const llvm::Type *DstTy = ConvertType(DstType);
+ llvm::Type *DstTy = ConvertType(DstType);
// Ignore conversions like int -> uint.
- if (Src->getType() == DstTy)
+ if (SrcTy == DstTy)
return Src;
// Handle pointer conversions next: pointers can only be converted to/from
@@ -563,13 +577,13 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// some native types (like Obj-C id) may map to a pointer type.
if (isa<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
- if (isa<llvm::PointerType>(Src->getType()))
+ if (isa<llvm::PointerType>(SrcTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -577,7 +591,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
}
- if (isa<llvm::PointerType>(Src->getType())) {
+ if (isa<llvm::PointerType>(SrcTy)) {
// Must be an ptr to int cast.
assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
return Builder.CreatePtrToInt(Src, DstTy, "conv");
@@ -592,10 +606,10 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
llvm::Value *Idx = Builder.getInt32(0);
- UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- llvm::SmallVector<llvm::Constant*, 16> Args;
+ SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i != NumElements; ++i)
Args.push_back(Builder.getInt32(0));
@@ -606,34 +620,47 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
}
// Allow bitcast from vector to integer/fp of the same size.
- if (isa<llvm::VectorType>(Src->getType()) ||
+ if (isa<llvm::VectorType>(SrcTy) ||
isa<llvm::VectorType>(DstTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
// Finally, we have the arithmetic types: real int/float.
- if (isa<llvm::IntegerType>(Src->getType())) {
+ Value *Res = NULL;
+ llvm::Type *ResTy = DstTy;
+
+ // Cast to half via float
+ if (DstType->isHalfType())
+ DstTy = llvm::Type::getFloatTy(VMContext);
+
+ if (isa<llvm::IntegerType>(SrcTy)) {
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
if (isa<llvm::IntegerType>(DstTy))
- return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
else if (InputSigned)
- return Builder.CreateSIToFP(Src, DstTy, "conv");
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
else
- return Builder.CreateUIToFP(Src, DstTy, "conv");
- }
-
- assert(Src->getType()->isFloatingPointTy() && "Unknown real conversion");
- if (isa<llvm::IntegerType>(DstTy)) {
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstTy)) {
+ assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
if (DstType->isSignedIntegerOrEnumerationType())
- return Builder.CreateFPToSI(Src, DstTy, "conv");
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
else
- return Builder.CreateFPToUI(Src, DstTy, "conv");
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstTy->getTypeID() < SrcTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
}
- assert(DstTy->isFloatingPointTy() && "Unknown real conversion");
- if (DstTy->getTypeID() < Src->getType()->getTypeID())
- return Builder.CreateFPTrunc(Src, DstTy, "conv");
- else
- return Builder.CreateFPExt(Src, DstTy, "conv");
+ if (DstTy != ResTy) {
+ assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
+ Res = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16), Res);
+ }
+
+ return Res;
}
/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
@@ -686,14 +713,14 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
Value *Mask;
- const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
unsigned LHSElts = LTy->getNumElements();
if (E->getNumSubExprs() == 3) {
Mask = CGF.EmitScalarExpr(E->getExpr(2));
// Shuffle LHS & RHS into one input vector.
- llvm::SmallVector<llvm::Constant*, 32> concat;
+ SmallVector<llvm::Constant*, 32> concat;
for (unsigned i = 0; i != LHSElts; ++i) {
concat.push_back(Builder.getInt32(2*i));
concat.push_back(Builder.getInt32(2*i+1));
@@ -706,7 +733,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Mask = RHS;
}
- const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
llvm::Constant* EltMask;
// Treat vec3 like vec4.
@@ -721,7 +748,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
(1 << llvm::Log2_32(LHSElts))-1);
// Mask off the high bits of each shuffle index.
- llvm::SmallVector<llvm::Constant *, 32> MaskV;
+ SmallVector<llvm::Constant *, 32> MaskV;
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i)
MaskV.push_back(EltMask);
@@ -734,7 +761,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// n = extract mask i
// x = extract val n
// newv = insert newv, x, i
- const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
@@ -760,8 +787,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
// Handle vec3 special since the index will be off by one for the RHS.
- const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
- llvm::SmallVector<llvm::Constant*, 32> indices;
+ llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ SmallVector<llvm::Constant*, 32> indices;
for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
if (VTy->getNumElements() == 3 && Idx > 3)
@@ -815,7 +842,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
}
static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
- unsigned Off, const llvm::Type *I32Ty) {
+ unsigned Off, llvm::Type *I32Ty) {
int MV = SVI->getMaskValue(Idx);
if (MV == -1)
return llvm::UndefValue::get(I32Ty);
@@ -831,12 +858,17 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- const llvm::VectorType *VType =
+ llvm::VectorType *VType =
dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
- // We have a scalar in braces. Just use the first element.
- if (!VType)
+ if (!VType) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the scalar.
+ return EmitNullValue(E->getType());
+ }
+ // We have a scalar in braces. Just use the first element.
return Visit(E->getInit(0));
+ }
unsigned ResElts = VType->getNumElements();
@@ -851,9 +883,9 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *IE = E->getInit(i);
Value *Init = Visit(IE);
- llvm::SmallVector<llvm::Constant*, 16> Args;
+ SmallVector<llvm::Constant*, 16> Args;
- const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+ llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
// Handle scalar elements. If the scalar initializer is actually one
// element of a different vector of the same width, use shuffle instead of
@@ -911,7 +943,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (isa<ExtVectorElementExpr>(IE)) {
llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
Value *SVOp = SVI->getOperand(0);
- const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+ llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
if (OpTy->getNumElements() == ResElts) {
for (unsigned j = 0; j != CurIdx; ++j) {
@@ -968,7 +1000,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: evaluate codegen vs. shuffling against constant null vector.
// Emit remaining default initializers.
- const llvm::Type *EltTy = VType->getElementType();
+ llvm::Type *EltTy = VType->getElementType();
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
@@ -1023,8 +1055,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
ConvertType(CGF.getContext().getPointerType(DestTy)));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy));
}
-
- case CK_AnyPointerToObjCPointerCast:
+
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_BitCast: {
Value *Src = Visit(const_cast<Expr*>(E));
@@ -1075,7 +1108,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
V = Builder.CreateStructGEP(V, 0, "arraydecay");
}
- return V;
+ // Make sure the array decay ends up being the right type. This matters if
+ // the array type was of an incomplete type.
+ return CGF.Builder.CreateBitCast(V, ConvertType(CE->getType()));
}
case CK_FunctionToPointerDecay:
return EmitLValue(E).getAddress();
@@ -1108,15 +1143,17 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
- case CK_ObjCProduceObject:
+ case CK_ARCProduceObject:
return CGF.EmitARCRetainScalarExpr(E);
- case CK_ObjCConsumeObject:
+ case CK_ARCConsumeObject:
return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
- case CK_ObjCReclaimReturnedObject: {
+ case CK_ARCReclaimReturnedObject: {
llvm::Value *value = Visit(E);
value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
return CGF.EmitObjCConsumeObject(E->getType(), value);
}
+ case CK_ARCExtendBlockObject:
+ return CGF.EmitARCExtendBlockObject(E);
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
@@ -1147,7 +1184,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -1163,16 +1200,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return 0;
}
case CK_VectorSplat: {
- const llvm::Type *DstTy = ConvertType(DestTy);
+ llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
llvm::Value *Idx = Builder.getInt32(0);
- UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
// Splat the element across to all elements
- llvm::SmallVector<llvm::Constant*, 16> Args;
+ SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
llvm::Constant *Zero = Builder.getInt32(0);
for (unsigned i = 0; i < NumElements; i++)
@@ -1188,7 +1225,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_FloatingToIntegral:
case CK_FloatingCast:
return EmitScalarConversion(Visit(E), E->getType(), DestTy);
-
case CK_IntegralToBoolean:
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
@@ -1253,10 +1289,8 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
BinOp.Opcode = BO_Add;
BinOp.E = E;
return EmitOverflowCheckedBinOp(BinOp);
- break;
}
- assert(false && "Unknown SignedOverflowBehaviorTy");
- return 0;
+ llvm_unreachable("Unknown SignedOverflowBehaviorTy");
}
llvm::Value *
@@ -1344,6 +1378,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isRealFloatingType()) {
// Add the inc/dec to the real part.
llvm::Value *amt;
+
+ if (type->isHalfType()) {
+ // Another special case: half FP increment should be done via float
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16),
+ input);
+ }
+
if (value->getType()->isFloatTy())
amt = llvm::ConstantFP::get(VMContext,
llvm::APFloat(static_cast<float>(amount)));
@@ -1359,6 +1401,11 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+ if (type->isHalfType())
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16),
+ value);
+
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
@@ -1375,13 +1422,13 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
-
+
// Store the updated result through the lvalue.
if (LV.isBitField())
CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
else
CGF.EmitStoreThroughLValue(RValue::get(value), LV);
-
+
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
return isPre ? value : input;
@@ -1432,7 +1479,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Loop over the components of the offsetof to compute the value.
unsigned n = E->getNumComponents();
- const llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Type* ResultType = ConvertType(E->getType());
llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
QualType CurrentType = E->getTypeSourceInfo()->getType();
for (unsigned i = 0; i != n; ++i) {
@@ -1686,7 +1733,7 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::next(insertPt));
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
- const llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
if (Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::Value *IntMin =
@@ -1769,7 +1816,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
IID = llvm::Intrinsic::smul_with_overflow;
break;
default:
- assert(false && "Unsupported operation for overflow detection");
+ llvm_unreachable("Unsupported operation for overflow detection");
IID = 0;
}
OpID <<= 1;
@@ -2065,7 +2112,7 @@ enum IntrinsicType { VCMPEQ, VCMPGT };
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
BuiltinType::Kind ElemKind) {
switch (ElemKind) {
- default: assert(0 && "unexpected element type");
+ default: llvm_unreachable("unexpected element type");
case BuiltinType::Char_U:
case BuiltinType::UChar:
return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
@@ -2135,7 +2182,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
BuiltinType::Kind ElementKind = BTy->getKind();
switch(E->getOpcode()) {
- default: assert(0 && "is not a comparison operation");
+ default: llvm_unreachable("is not a comparison operation");
case BO_EQ:
CR6 = CR6_LT;
ID = GetIntrinsic(VCMPEQ, ElementKind);
@@ -2294,7 +2341,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
- const llvm::Type *ResTy = ConvertType(E->getType());
+ llvm::Type *ResTy = ConvertType(E->getType());
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
// If we have 1 && X, just emit X without inserting the control flow.
@@ -2349,7 +2396,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
}
Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
- const llvm::Type *ResTy = ConvertType(E->getType());
+ llvm::Type *ResTy = ConvertType(E->getType());
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
// If we have 0 || X, just emit X without inserting the control flow.
@@ -2471,11 +2518,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
- const llvm::Type *condType = ConvertType(condExpr->getType());
- const llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+ llvm::Type *condType = ConvertType(condExpr->getType());
+ llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
unsigned numElem = vecTy->getNumElements();
- const llvm::Type *elemType = vecTy->getElementType();
+ llvm::Type *elemType = vecTy->getElementType();
std::vector<llvm::Constant*> Zvals;
for (unsigned i = 0; i < numElem; ++i)
@@ -2493,7 +2540,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::Value *RHSTmp = RHS;
llvm::Value *LHSTmp = LHS;
bool wasCast = false;
- const llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
if (rhsVTy->getElementType()->isFloatTy()) {
RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
@@ -2578,11 +2625,11 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
- const llvm::Type *DstTy = ConvertType(E->getType());
+ llvm::Type *DstTy = ConvertType(E->getType());
// Going from vec4->vec3 or vec3->vec4 is a special case and requires
// a shuffle vector instead of a bitcast.
- const llvm::Type *SrcTy = Src->getType();
+ llvm::Type *SrcTy = Src->getType();
if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
@@ -2592,15 +2639,15 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// In the case of going from int4->float3, a bitcast is needed before
// doing a shuffle.
- const llvm::Type *srcElemTy =
+ llvm::Type *srcElemTy =
cast<llvm::VectorType>(SrcTy)->getElementType();
- const llvm::Type *dstElemTy =
+ llvm::Type *dstElemTy =
cast<llvm::VectorType>(DstTy)->getElementType();
if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
|| (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
// Create a float type of the same size as the source or destination.
- const llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
numElementsSrc);
Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
@@ -2608,7 +2655,7 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
- llvm::SmallVector<llvm::Constant*, 3> Args;
+ SmallVector<llvm::Constant*, 3> Args;
Args.push_back(Builder.getInt32(0));
Args.push_back(Builder.getInt32(1));
Args.push_back(Builder.getInt32(2));
@@ -2626,6 +2673,10 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
return Builder.CreateBitCast(Src, DstTy, "astype");
}
+Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getScalarVal();
+}
+
//===----------------------------------------------------------------------===//
// Entry Point into this File
//===----------------------------------------------------------------------===//
@@ -2678,7 +2729,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
// object->isa or (*object).isa
// Generate code as for: *(Class*)object
// build Class* type
- const llvm::Type *ClassPtrTy = ConvertType(E->getType());
+ llvm::Type *ClassPtrTy = ConvertType(E->getType());
Expr *BaseExpr = E->getBase();
if (BaseExpr->isRValue()) {
@@ -2744,8 +2795,7 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue(
case BO_LOr:
case BO_Assign:
case BO_Comma:
- assert(false && "Not valid compound assignment operators");
- break;
+ llvm_unreachable("Not valid compound assignment operators");
}
llvm_unreachable("Unhandled compound assignment operator");
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 426cca00140c..51f20534d116 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -33,7 +33,7 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
static llvm::Constant *getNullForVariable(llvm::Value *addr) {
- const llvm::Type *type =
+ llvm::Type *type =
cast<llvm::PointerType>(addr->getType())->getElementType();
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
}
@@ -80,6 +80,53 @@ static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
CGF.ConvertType(E->getType())));
}
+/// Decide whether to extend the lifetime of the receiver of a
+/// returns-inner-pointer message.
+static bool
+shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
+ switch (message->getReceiverKind()) {
+
+ // For a normal instance message, we should extend unless the
+ // receiver is loaded from a variable with precise lifetime.
+ case ObjCMessageExpr::Instance: {
+ const Expr *receiver = message->getInstanceReceiver();
+ const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
+ if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
+ receiver = ice->getSubExpr()->IgnoreParens();
+
+ // Only __strong variables.
+ if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return true;
+
+ // All ivars and fields have precise lifetime.
+ if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
+ return false;
+
+ // Otherwise, check for variables.
+ const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
+ if (!declRef) return true;
+ const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
+ if (!var) return true;
+
+ // All variables have precise lifetime except local variables with
+ // automatic storage duration that aren't specially marked.
+ return (var->hasLocalStorage() &&
+ !var->hasAttr<ObjCPreciseLifetimeAttr>());
+ }
+
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ // It's never necessary for class objects.
+ return false;
+
+ case ObjCMessageExpr::SuperInstance:
+ // We generally assume that 'self' lives throughout a method call.
+ return false;
+ }
+
+ llvm_unreachable("invalid receiver kind");
+}
+
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
@@ -88,6 +135,8 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
bool isDelegateInit = E->isDelegateInitCall();
+ const ObjCMethodDecl *method = E->getMethodDecl();
+
// We don't retain the receiver in delegate init calls, and this is
// safe because the receiver value is always loaded from 'self',
// which we zero out. We don't want to Block_copy block receivers,
@@ -95,8 +144,8 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
bool retainSelf =
(!isDelegateInit &&
CGM.getLangOptions().ObjCAutoRefCount &&
- E->getMethodDecl() &&
- E->getMethodDecl()->hasAttr<NSConsumesSelfAttr>());
+ method &&
+ method->hasAttr<NSConsumesSelfAttr>());
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
bool isSuperMessage = false;
@@ -112,8 +161,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
E->getInstanceReceiver());
Receiver = ter.getPointer();
- if (!ter.getInt())
- Receiver = EmitARCRetainNonBlock(Receiver);
+ if (ter.getInt()) retainSelf = false;
} else
Receiver = EmitScalarExpr(E->getInstanceReceiver());
break;
@@ -126,9 +174,6 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
-
- if (retainSelf)
- Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
@@ -136,9 +181,6 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
-
- if (retainSelf)
- Receiver = EmitARCRetainNonBlock(Receiver);
break;
case ObjCMessageExpr::SuperClass:
@@ -146,17 +188,25 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
-
- if (retainSelf)
- Receiver = EmitARCRetainNonBlock(Receiver);
break;
}
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
+
+ // In ARC, we sometimes want to "extend the lifetime"
+ // (i.e. retain+autorelease) of receivers of returns-inner-pointer
+ // messages.
+ if (getLangOptions().ObjCAutoRefCount && method &&
+ method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
+ shouldExtendReceiverForInnerPointerMessage(E))
+ Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
+
QualType ResultType =
- E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType();
+ method ? method->getResultType() : E->getType();
CallArgList Args;
- EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+ EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
// For delegate init calls in ARC, do an unsafe store of null into
// self. This represents the call taking direct ownership of that
@@ -189,12 +239,12 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
Receiver,
isClassMessage,
Args,
- E->getMethodDecl());
+ method);
} else {
result = Runtime.GenerateMessageSend(*this, Return, ResultType,
E->getSelector(),
Receiver, Args, OID,
- E->getMethodDecl());
+ method);
}
// For delegate init calls in ARC, implicitly store the result of
@@ -206,14 +256,14 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
// The delegate return type isn't necessarily a matching type; in
// fact, it's quite likely to be 'id'.
- const llvm::Type *selfTy =
+ llvm::Type *selfTy =
cast<llvm::PointerType>(selfAddr->getType())->getElementType();
newSelf = Builder.CreateBitCast(newSelf, selfTy);
Builder.CreateStore(newSelf, selfAddr);
}
- return AdjustRelatedResultType(*this, E, E->getMethodDecl(), result);
+ return AdjustRelatedResultType(*this, E, method, result);
}
namespace {
@@ -263,7 +313,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
args.push_back(OMD->getSelfDecl());
args.push_back(OMD->getCmdDecl());
- for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
E = OMD->param_end(); PI != E; ++PI)
args.push_back(*PI);
@@ -285,39 +335,6 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue, QualType type);
-void CodeGenFunction::GenerateObjCGetterBody(ObjCIvarDecl *Ivar,
- bool IsAtomic, bool IsStrong) {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetGetStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- // objc_copyStruct (ReturnValue, &structIvar,
- // sizeof (Type of Ivar), isAtomic, false);
- CallArgList Args;
- RValue RV = RValue::get(Builder.CreateBitCast(ReturnValue, VoidPtrTy));
- Args.add(RV, getContext().VoidPtrTy);
- RV = RValue::get(Builder.CreateBitCast(LV.getAddress(), VoidPtrTy));
- Args.add(RV, getContext().VoidPtrTy);
- // sizeof (Type of Ivar)
- CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
- Size.getQuantity());
- Args.add(RValue::get(SizeVal), getContext().LongTy);
- llvm::Value *isAtomic =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsAtomic ? 1 : 0);
- Args.add(RValue::get(isAtomic), getContext().BoolTy);
- llvm::Value *hasStrong =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy),
- IsStrong ? 1 : 0);
- Args.add(RValue::get(hasStrong), getContext().BoolTy);
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
-}
-
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
@@ -326,218 +343,599 @@ void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
FinishFunction(OMD->getBodyRBrace());
}
-// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
-// AST for the whole body we can just fall back to having a GenerateFunction
-// which takes the body Stmt.
+/// emitStructGetterCall - Call the runtime function to load a property
+/// into the return value slot.
+static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ bool isAtomic, bool hasStrong) {
+ ASTContext &Context = CGF.getContext();
+
+ llvm::Value *src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
+ ivar, 0).getAddress();
+
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList args;
+
+ llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest), Context.VoidPtrTy);
+
+ src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
+ args.add(RValue::get(src), Context.VoidPtrTy);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
+ args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
+ args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
+ args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
+
+ llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGF.EmitCall(CGF.getTypes().getFunctionInfo(Context.VoidTy, args,
+ FunctionType::ExtInfo()),
+ fn, ReturnValueSlot(), args);
+}
+
+/// Determine whether the given architecture supports unaligned atomic
+/// accesses. They don't have to be fast, just faster than a function
+/// call and a mutex.
+static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
+ // FIXME: Allow unaligned atomic load/store on x86. (It is not
+ // currently supported by the backend.)
+ return 0;
+}
+
+/// Return the maximum size that permits atomic accesses for the given
+/// architecture.
+static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
+ llvm::Triple::ArchType arch) {
+ // ARM has 8-byte atomic accesses, but it's not clear whether we
+ // want to rely on them here.
+
+ // In the default case, just assume that any size up to a pointer is
+ // fine given adequate alignment.
+ return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
+}
+
+namespace {
+ class PropertyImplStrategy {
+ public:
+ enum StrategyKind {
+ /// The 'native' strategy is to use the architecture's provided
+ /// reads and writes.
+ Native,
+
+ /// Use objc_setProperty and objc_getProperty.
+ GetSetProperty,
+
+ /// Use objc_setProperty for the setter, but use expression
+ /// evaluation for the getter.
+ SetPropertyAndExpressionGet,
+
+ /// Use objc_copyStruct.
+ CopyStruct,
+
+ /// The 'expression' strategy is to emit normal assignment or
+ /// lvalue-to-rvalue expressions.
+ Expression
+ };
+
+ StrategyKind getKind() const { return StrategyKind(Kind); }
+
+ bool hasStrongMember() const { return HasStrong; }
+ bool isAtomic() const { return IsAtomic; }
+ bool isCopy() const { return IsCopy; }
+
+ CharUnits getIvarSize() const { return IvarSize; }
+ CharUnits getIvarAlignment() const { return IvarAlignment; }
+
+ PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl);
+
+ private:
+ unsigned Kind : 8;
+ unsigned IsAtomic : 1;
+ unsigned IsCopy : 1;
+ unsigned HasStrong : 1;
+
+ CharUnits IvarSize;
+ CharUnits IvarAlignment;
+ };
+}
+
+/// Pick an implementation strategy for the the given property synthesis.
+PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
+
+ IsCopy = (setterKind == ObjCPropertyDecl::Copy);
+ IsAtomic = prop->isAtomic();
+ HasStrong = false; // doesn't matter here.
+
+ // Evaluate the ivar's size and alignment.
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ QualType ivarType = ivar->getType();
+ llvm::tie(IvarSize, IvarAlignment)
+ = CGM.getContext().getTypeInfoInChars(ivarType);
+
+ // If we have a copy property, we always have to use getProperty/setProperty.
+ // TODO: we could actually use setProperty and an expression for non-atomics.
+ if (IsCopy) {
+ Kind = GetSetProperty;
+ return;
+ }
+
+ // Handle retain.
+ if (setterKind == ObjCPropertyDecl::Retain) {
+ // In GC-only, there's nothing special that needs to be done.
+ if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
+ // fallthrough
+
+ // In ARC, if the property is non-atomic, use expression emission,
+ // which translates to objc_storeStrong. This isn't required, but
+ // it's slightly nicer.
+ } else if (CGM.getLangOptions().ObjCAutoRefCount && !IsAtomic) {
+ Kind = Expression;
+ return;
+
+ // Otherwise, we need to at least use setProperty. However, if
+ // the property isn't atomic, we can use normal expression
+ // emission for the getter.
+ } else if (!IsAtomic) {
+ Kind = SetPropertyAndExpressionGet;
+ return;
+
+ // Otherwise, we have to use both setProperty and getProperty.
+ } else {
+ Kind = GetSetProperty;
+ return;
+ }
+ }
+
+ // If we're not atomic, just use expression accesses.
+ if (!IsAtomic) {
+ Kind = Expression;
+ return;
+ }
+
+ // Properties on bitfield ivars need to be emitted using expression
+ // accesses even if they're nominally atomic.
+ if (ivar->isBitField()) {
+ Kind = Expression;
+ return;
+ }
+
+ // GC-qualified or ARC-qualified ivars need to be emitted as
+ // expressions. This actually works out to being atomic anyway,
+ // except for ARC __strong, but that should trigger the above code.
+ if (ivarType.hasNonTrivialObjCLifetime() ||
+ (CGM.getLangOptions().getGC() &&
+ CGM.getContext().getObjCGCAttrKind(ivarType))) {
+ Kind = Expression;
+ return;
+ }
+
+ // Compute whether the ivar has strong members.
+ if (CGM.getLangOptions().getGC())
+ if (const RecordType *recordType = ivarType->getAs<RecordType>())
+ HasStrong = recordType->getDecl()->hasObjectMember();
+
+ // We can never access structs with object members with a native
+ // access, because we need to use write barriers. This is what
+ // objc_copyStruct is for.
+ if (HasStrong) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, this is target-dependent and based on the size and
+ // alignment of the ivar.
+
+ // If the size of the ivar is not a power of two, give up. We don't
+ // want to get into the business of doing compare-and-swaps.
+ if (!IvarSize.isPowerOfTwo()) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ llvm::Triple::ArchType arch =
+ CGM.getContext().getTargetInfo().getTriple().getArch();
+
+ // Most architectures require memory to fit within a single cache
+ // line, so the alignment has to be at least the size of the access.
+ // Otherwise we have to grab a lock.
+ if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // If the ivar's size exceeds the architecture's maximum atomic
+ // access size, we have to use CopyStruct.
+ if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, we can use native loads and stores.
+ Kind = Native;
+}
/// GenerateObjCGetter - Generate an Objective-C property getter
/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
- bool IsAtomic =
- !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
-
- // Determine if we should use an objc_getProperty call for
- // this. Non-atomic properties are directly evaluated.
- // atomic 'copy' and 'retain' properties are also directly
- // evaluated in gc-only mode.
- if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
- IsAtomic &&
- (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
- PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *GetPropertyFn =
- CGM.getObjCRuntime().GetPropertyGetFunction();
- if (!GetPropertyFn) {
- CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
- FinishFunction();
+ generateObjCGetterBody(IMP, PID);
+
+ FinishFunction();
+}
+
+static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
+ const Expr *getter = propImpl->getGetterCXXConstructor();
+ if (!getter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // If the property has a reference type, we might just be binding a
+ // reference, in which case the result will be a gl-value. We should
+ // treat this as a non-trivial operation.
+ if (getter->isGLValue())
+ return false;
+
+ // If we selected a trivial copy-constructor, we're okay.
+ if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
+ return (construct->getConstructor()->isTrivial());
+
+ // The constructor might require cleanups (in which case it's never
+ // trivial).
+ assert(isa<ExprWithCleanups>(getter));
+ return false;
+}
+
+void
+CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl) {
+ // If there's a non-trivial 'get' expression, we just have to emit that.
+ if (!hasTrivialGetExpr(propImpl)) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ 0);
+ EmitReturnStmt(ret);
+ return;
+ }
+
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ QualType propType = prop->getType();
+ ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
+
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+
+ // Pick an implementation strategy.
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Perform an atomic load. This does not impose ordering constraints.
+ llvm::Value *ivarAddr = LV.getAddress();
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
+ load->setAlignment(strategy.getIvarAlignment().getQuantity());
+ load->setAtomic(llvm::Unordered);
+
+ // Store that value into the return address. Doing this with a
+ // bitcast is likely to produce some pretty ugly IR, but it's not
+ // the *most* terrible thing in the world.
+ Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
+
+ // Make sure we don't do an autorelease.
+ AutoreleaseResult = false;
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty: {
+ llvm::Value *getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+ if (!getPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
}
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
- CodeGenTypes &Types = CGM.getTypes();
- ValueDecl *Cmd = OMD->getCmdDecl();
- llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
- QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
- Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
- llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
- llvm::Value *True =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- CallArgList Args;
- Args.add(RValue::get(SelfAsId), IdTy);
- Args.add(RValue::get(CmdVal), Cmd->getType());
- Args.add(RValue::get(Offset), getContext().getPointerDiffType());
- Args.add(RValue::get(True), getContext().BoolTy);
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
+ llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
- FunctionType::ExtInfo()),
- GetPropertyFn, ReturnValueSlot(), Args);
+ RValue RV = EmitCall(getTypes().getFunctionInfo(propType, args,
+ FunctionType::ExtInfo()),
+ getPropertyFn, ReturnValueSlot(), args);
+
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
- EmitReturnOfRValue(RV, PD->getType());
+ getTypes().ConvertType(propType)));
+
+ EmitReturnOfRValue(RV, propType);
// objc_getProperty does an autorelease, so we should suppress ours.
AutoreleaseResult = false;
- } else {
- const llvm::Triple &Triple = getContext().Target.getTriple();
- QualType IVART = Ivar->getType();
- if (IsAtomic &&
- IVART->isScalarType() &&
- (Triple.getArch() == llvm::Triple::arm ||
- Triple.getArch() == llvm::Triple::thumb) &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, true, false);
- }
- else if (IsAtomic &&
- (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
- Triple.getArch() == llvm::Triple::x86 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, true, false);
- }
- else if (IsAtomic &&
- (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
- Triple.getArch() == llvm::Triple::x86_64 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(8)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, true, false);
- }
- else if (IVART->isAnyComplexType()) {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructGetterCall(*this, ivar, strategy.isAtomic(),
+ strategy.hasStrongMember());
+ return;
+
+ case PropertyImplStrategy::Expression:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ QualType ivarType = ivar->getType();
+ if (ivarType->isAnyComplexType()) {
+ ComplexPairTy pair = LoadComplexFromAddr(LV.getAddress(),
LV.isVolatileQualified());
- StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
- }
- else if (hasAggregateLLVMType(IVART)) {
- bool IsStrong = false;
- if ((IsStrong = IvarTypeWithAggrGCObjects(IVART))
- && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
- && CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, IsAtomic, IsStrong);
- }
- else {
- const CXXRecordDecl *classDecl = IVART->getAsCXXRecordDecl();
-
- if (PID->getGetterCXXConstructor() &&
- classDecl && !classDecl->hasTrivialDefaultConstructor()) {
- ReturnStmt *Stmt =
- new (getContext()) ReturnStmt(SourceLocation(),
- PID->getGetterCXXConstructor(),
- 0);
- EmitReturnStmt(*Stmt);
- } else if (IsAtomic &&
- !IVART->isAnyComplexType() &&
- Triple.getArch() == llvm::Triple::x86 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, true, false);
- }
- else if (IsAtomic &&
- !IVART->isAnyComplexType() &&
- Triple.getArch() == llvm::Triple::x86_64 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(8)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCGetterBody(Ivar, true, false);
- }
- else {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- EmitAggregateCopy(ReturnValue, LV.getAddress(), IVART);
- }
- }
- }
- else {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
- Ivar, 0);
- QualType propType = PD->getType();
-
- llvm::Value *value;
- if (propType->isReferenceType()) {
- value = LV.getAddress();
+ StoreComplexToAddr(pair, ReturnValue, LV.isVolatileQualified());
+ } else if (hasAggregateLLVMType(ivarType)) {
+ // The return value slot is guaranteed to not be aliased, but
+ // that's not necessarily the same as "on the stack", so
+ // we still potentially need objc_memmove_collectable.
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
+ } else {
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // We want to load and autoreleaseReturnValue ARC __weak ivars.
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+
+ // Otherwise we want to do a simple load, suppressing the
+ // final autorelease.
} else {
- // In ARC, we want to emit this retained.
- if (getLangOptions().ObjCAutoRefCount &&
- PD->getType()->isObjCRetainableType())
- value = emitARCRetainLoadOfScalar(*this, LV, IVART);
- else
- value = EmitLoadOfLValue(LV).getScalarVal();
-
- value = Builder.CreateBitCast(value, ConvertType(propType));
+ value = EmitLoadOfLValue(LV).getScalarVal();
+ AutoreleaseResult = false;
}
- EmitReturnOfRValue(RValue::get(value), propType);
+ value = Builder.CreateBitCast(value, ConvertType(propType));
+ }
+
+ EmitReturnOfRValue(RValue::get(value), propType);
}
+ return;
}
- FinishFunction();
+ }
+ llvm_unreachable("bad @property implementation strategy!");
}
-void CodeGenFunction::GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
- ObjCIvarDecl *Ivar) {
+/// emitStructSetterCall - Call the runtime function to store the value
+/// from the first formal parameter into the given ivar.
+static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar) {
// objc_copyStruct (&structIvar, &Arg,
// sizeof (struct something), true, false);
- llvm::Value *GetCopyStructFn =
- CGM.getObjCRuntime().GetSetStructFunction();
- CodeGenTypes &Types = CGM.getTypes();
- CallArgList Args;
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
- RValue RV =
- RValue::get(Builder.CreateBitCast(LV.getAddress(),
- Types.ConvertType(getContext().VoidPtrTy)));
- Args.add(RV, getContext().VoidPtrTy);
- llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsPtrTy =
- Builder.CreateBitCast(Arg,
- Types.ConvertType(getContext().VoidPtrTy));
- RV = RValue::get(ArgAsPtrTy);
- Args.add(RV, getContext().VoidPtrTy);
- // sizeof (Type of Ivar)
- CharUnits Size = getContext().getTypeSizeInChars(Ivar->getType());
- llvm::Value *SizeVal =
- llvm::ConstantInt::get(Types.ConvertType(getContext().LongTy),
- Size.getQuantity());
- Args.add(RValue::get(SizeVal), getContext().LongTy);
- llvm::Value *True =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- Args.add(RValue::get(True), getContext().BoolTy);
- llvm::Value *False =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
- Args.add(RValue::get(False), getContext().BoolTy);
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- GetCopyStructFn, ReturnValueSlot(), Args);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, argVar->getType(), VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // The third argument is the sizeof the type.
+ llvm::Value *size =
+ CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
+ args.add(RValue::get(size), CGF.getContext().getSizeType());
+
+ // The fourth argument is the 'isAtomic' flag.
+ args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
+
+ // The fifth argument is the 'hasStrong' flag.
+ // FIXME: should this really always be false?
+ args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
+
+ llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo()),
+ copyStructFn, ReturnValueSlot(), args);
+}
+
+static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
+ Expr *setter = PID->getSetterCXXAssignment();
+ if (!setter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // An operator call is trivial if the function it calls is trivial.
+ // This also implies that there's nothing non-trivial going on with
+ // the arguments, because operator= can only be trivial if it's a
+ // synthesized assignment operator and therefore both parameters are
+ // references.
+ if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
+ if (const FunctionDecl *callee
+ = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
+ if (callee->isTrivial())
+ return true;
+ return false;
+ }
+
+ assert(isa<ExprWithCleanups>(setter));
+ return false;
}
-static bool
-IvarAssignHasTrvialAssignment(const ObjCPropertyImplDecl *PID,
- QualType IvarT) {
- bool HasTrvialAssignment = true;
- if (PID->getSetterCXXAssignment()) {
- const CXXRecordDecl *classDecl = IvarT->getAsCXXRecordDecl();
- HasTrvialAssignment =
- (!classDecl || classDecl->hasTrivialCopyAssignment());
+void
+CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl) {
+ // Just use the setter expression if Sema gave us one and it's
+ // non-trivial. There's no way to do this atomically.
+ if (!hasTrivialSetExpr(propImpl)) {
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ return;
+ }
+
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
+
+ LValue ivarLValue =
+ EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
+ llvm::Value *ivarAddr = ivarLValue.getAddress();
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Cast both arguments to the chosen operation type.
+ argAddr = Builder.CreateBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+
+ // This bitcast load is likely to cause some nasty IR.
+ llvm::Value *load = Builder.CreateLoad(argAddr);
+
+ // Perform an atomic store. There are no memory ordering requirements.
+ llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
+ store->setAlignment(strategy.getIvarAlignment().getQuantity());
+ store->setAtomic(llvm::Unordered);
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+ llvm::Value *setPropertyFn =
+ CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
+ llvm::Value *self =
+ Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
+ arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().getFunctionInfo(getContext().VoidTy, args,
+ FunctionType::ExtInfo()),
+ setPropertyFn, ReturnValueSlot(), args);
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructSetterCall(*this, setterMethod, ivar);
+ return;
+
+ case PropertyImplStrategy::Expression:
+ break;
+ }
+
+ // Otherwise, fake up some ASTs and emit a normal assignment.
+ ValueDecl *selfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr self(selfDecl, selfDecl->getType(), VK_LValue, SourceLocation());
+ ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
+ selfDecl->getType(), CK_LValueToRValue, &self,
+ VK_RValue);
+ ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
+ SourceLocation(), &selfLoad, true, true);
+
+ ParmVarDecl *argDecl = *setterMethod->param_begin();
+ QualType argType = argDecl->getType().getNonReferenceType();
+ DeclRefExpr arg(argDecl, argType, VK_LValue, SourceLocation());
+ ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
+ argType.getUnqualifiedType(), CK_LValueToRValue,
+ &arg, VK_RValue);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ // The following absurdity is just to ensure well-formed IR.
+ CastKind argCK = CK_NoOp;
+ if (ivarRef.getType()->isObjCObjectPointerType()) {
+ if (argLoad.getType()->isObjCObjectPointerType())
+ argCK = CK_BitCast;
+ else if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BlockPointerToObjCPointerCast;
+ else
+ argCK = CK_CPointerToObjCPointerCast;
+ } else if (ivarRef.getType()->isBlockPointerType()) {
+ if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BitCast;
+ else
+ argCK = CK_AnyPointerToBlockPointerCast;
+ } else if (ivarRef.getType()->isPointerType()) {
+ argCK = CK_BitCast;
}
- return HasTrvialAssignment;
+ ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
+ ivarRef.getType(), argCK, &argLoad,
+ VK_RValue);
+ Expr *finalArg = &argLoad;
+ if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
+ argLoad.getType()))
+ finalArg = &argCast;
+
+
+ BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
+ ivarRef.getType(), VK_RValue, OK_Ordinary,
+ SourceLocation());
+ EmitStmt(&assign);
}
/// GenerateObjCSetter - Generate an Objective-C property setter
@@ -545,136 +943,12 @@ IvarAssignHasTrvialAssignment(const ObjCPropertyImplDecl *PID,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
- const llvm::Triple &Triple = getContext().Target.getTriple();
- QualType IVART = Ivar->getType();
- bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
- bool IsAtomic =
- !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
-
- // Determine if we should use an objc_setProperty call for
- // this. Properties with 'copy' semantics always use it, as do
- // non-atomic properties with 'release' semantics as long as we are
- // not in gc-only mode.
- if (IsCopy ||
- (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
- PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *SetPropertyFn =
- CGM.getObjCRuntime().GetPropertySetFunction();
-
- if (!SetPropertyFn) {
- CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
- FinishFunction();
- return;
- }
- // Emit objc_setProperty((id) self, _cmd, offset, arg,
- // <is-atomic>, <is-copy>).
- // FIXME: Can't this be simpler? This might even be worse than the
- // corresponding gcc code.
- CodeGenTypes &Types = CGM.getTypes();
- ValueDecl *Cmd = OMD->getCmdDecl();
- llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
- QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
- Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
- llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
- llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsId =
- Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
- Types.ConvertType(IdTy));
- llvm::Value *True =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
- llvm::Value *False =
- llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
- CallArgList Args;
- Args.add(RValue::get(SelfAsId), IdTy);
- Args.add(RValue::get(CmdVal), Cmd->getType());
- Args.add(RValue::get(Offset), getContext().getPointerDiffType());
- Args.add(RValue::get(ArgAsId), IdTy);
- Args.add(RValue::get(IsAtomic ? True : False), getContext().BoolTy);
- Args.add(RValue::get(IsCopy ? True : False), getContext().BoolTy);
- // FIXME: We shouldn't need to get the function info here, the runtime
- // already should have computed it to build the function.
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- FunctionType::ExtInfo()),
- SetPropertyFn,
- ReturnValueSlot(), Args);
- } else if (IsAtomic && hasAggregateLLVMType(IVART) &&
- !IVART->isAnyComplexType() &&
- IvarAssignHasTrvialAssignment(PID, IVART) &&
- ((Triple.getArch() == llvm::Triple::x86 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4))) ||
- (Triple.getArch() == llvm::Triple::x86_64 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(8))))
- && CGM.getObjCRuntime().GetSetStructFunction()) {
- // objc_copyStruct (&structIvar, &Arg,
- // sizeof (struct something), true, false);
- GenerateObjCAtomicSetterBody(OMD, Ivar);
- } else if (PID->getSetterCXXAssignment()) {
- EmitIgnoredExpr(PID->getSetterCXXAssignment());
- } else {
- if (IsAtomic &&
- IVART->isScalarType() &&
- (Triple.getArch() == llvm::Triple::arm ||
- Triple.getArch() == llvm::Triple::thumb) &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCAtomicSetterBody(OMD, Ivar);
- }
- else if (IsAtomic &&
- (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
- Triple.getArch() == llvm::Triple::x86 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(4)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCAtomicSetterBody(OMD, Ivar);
- }
- else if (IsAtomic &&
- (IVART->isScalarType() && !IVART->isRealFloatingType()) &&
- Triple.getArch() == llvm::Triple::x86_64 &&
- (getContext().getTypeSizeInChars(IVART)
- > CharUnits::fromQuantity(8)) &&
- CGM.getObjCRuntime().GetGetStructFunction()) {
- GenerateObjCAtomicSetterBody(OMD, Ivar);
- }
- else {
- // FIXME: Find a clean way to avoid AST node creation.
- SourceLocation Loc = PID->getLocStart();
- ValueDecl *Self = OMD->getSelfDecl();
- ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
- DeclRefExpr Base(Self, Self->getType(), VK_RValue, Loc);
- ParmVarDecl *ArgDecl = *OMD->param_begin();
- QualType T = ArgDecl->getType();
- if (T->isReferenceType())
- T = cast<ReferenceType>(T)->getPointeeType();
- DeclRefExpr Arg(ArgDecl, T, VK_LValue, Loc);
- ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
-
- // The property type can differ from the ivar type in some situations with
- // Objective-C pointer types, we can always bit cast the RHS in these cases.
- if (getContext().getCanonicalType(Ivar->getType()) !=
- getContext().getCanonicalType(ArgDecl->getType())) {
- ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
- Ivar->getType(), CK_BitCast, &Arg,
- VK_RValue);
- BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
- Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
- EmitStmt(&Assign);
- } else {
- BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
- Ivar->getType(), VK_RValue, OK_Ordinary, Loc);
- EmitStmt(&Assign);
- }
- }
- }
+ generateObjCSetterBody(IMP, PID);
FinishFunction();
}
@@ -716,9 +990,8 @@ static void emitCXXDestructMethod(CodeGenFunction &CGF,
llvm::Value *self = CGF.LoadObjCSelf();
- ObjCInterfaceDecl *iface
- = const_cast<ObjCInterfaceDecl*>(impl->getClassInterface());
- for (ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
ivar; ivar = ivar->getNextIvar()) {
QualType type = ivar->getType();
@@ -758,7 +1031,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
// Suppress the final autorelease in ARC.
AutoreleaseResult = false;
- llvm::SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
+ SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
E = IMP->init_end(); B != E; ++B) {
CXXCtorInitializer *IvarInit = (*B);
@@ -766,7 +1039,10 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
- EmitAggExpr(IvarInit->getInit(), AggValueSlot::forLValue(LV, true));
+ EmitAggExpr(IvarInit->getInit(),
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
}
// constructor returns 'self'.
CodeGenTypes &Types = CGM.getTypes();
@@ -791,7 +1067,7 @@ bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
}
bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ if (CGM.getLangOptions().getGC() == LangOptions::NonGC)
return false;
if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
return FDTTy->getDecl()->hasObjectMember();
@@ -896,7 +1172,7 @@ void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
if (Src.isScalar()) {
llvm::Value *SrcVal = Src.getScalarVal();
QualType DstType = getContext().getCanonicalType(ArgType);
- const llvm::Type *DstTy = ConvertType(DstType);
+ llvm::Type *DstTy = ConvertType(DstType);
if (SrcVal->getType() != DstTy)
Src =
RValue::get(EmitScalarConversion(SrcVal, E->getType(), DstType));
@@ -932,10 +1208,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
}
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getSourceRange().getBegin());
- DI->EmitRegionStart(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
// The local variable comes into scope immediately.
AutoVarEmission variable = AutoVarEmission::invalid();
@@ -943,10 +1217,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
- JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
// Fast enumeration state.
- QualType StateTy = getContext().getObjCFastEnumerationStateType();
+ QualType StateTy = CGM.getObjCFastEnumerationStateType();
llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
EmitNullInitialization(StatePtr, StateTy);
@@ -968,8 +1241,20 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
- // Emit the collection pointer.
- llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+ // Emit the collection pointer. In ARC, we do a retain.
+ llvm::Value *Collection;
+ if (getLangOptions().ObjCAutoRefCount) {
+ Collection = EmitARCRetainScalarExpr(S.getCollection());
+
+ // Enter a cleanup to do the release.
+ EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
+ } else {
+ Collection = EmitScalarExpr(S.getCollection());
+ }
+
+ // The 'continue' label needs to appear within the cleanup for the
+ // collection object.
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
// Send it our message:
CallArgList Args;
@@ -985,7 +1270,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
- const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
Args.add(RValue::get(Count), getContext().UnsignedLongTy);
@@ -1053,8 +1338,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitBlock(WasMutatedBB);
llvm::Value *V =
Builder.CreateBitCast(Collection,
- ConvertType(getContext().getObjCIdType()),
- "tmp");
+ ConvertType(getContext().getObjCIdType()));
CallArgList Args2;
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
@@ -1089,7 +1373,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementType = cast<Expr>(S.getElement())->getType();
elementIsVariable = false;
}
- const llvm::Type *convertedElementType = ConvertType(elementType);
+ llvm::Type *convertedElementType = ConvertType(elementType);
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
@@ -1179,10 +1463,12 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitStoreThroughLValue(RValue::get(null), elementLValue);
}
- if (DI) {
- DI->setLocation(S.getSourceRange().getEnd());
- DI->EmitRegionEnd(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Leave the cleanup we entered in ARC.
+ if (getLangOptions().ObjCAutoRefCount)
+ PopCleanupBlock();
EmitBlock(LoopEnd.getBlock());
}
@@ -1200,7 +1486,7 @@ void CodeGenFunction::EmitObjCAtSynchronizedStmt(
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
-/// Produce the code for a CK_ObjCProduceObject. Just does a
+/// Produce the code for a CK_ARCProduceObject. Just does a
/// primitive retain.
llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
llvm::Value *value) {
@@ -1209,63 +1495,22 @@ llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
namespace {
struct CallObjCRelease : EHScopeStack::Cleanup {
- CallObjCRelease(QualType type, llvm::Value *ptr, llvm::Value *condition)
- : type(type), ptr(ptr), condition(condition) {}
- QualType type;
- llvm::Value *ptr;
- llvm::Value *condition;
+ CallObjCRelease(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
void Emit(CodeGenFunction &CGF, Flags flags) {
- llvm::Value *object;
-
- // If we're in a conditional branch, we had to stash away in an
- // alloca the pointer to be released.
- llvm::BasicBlock *cont = 0;
- if (condition) {
- llvm::BasicBlock *release = CGF.createBasicBlock("release.yes");
- cont = CGF.createBasicBlock("release.cont");
-
- llvm::Value *cond = CGF.Builder.CreateLoad(condition);
- CGF.Builder.CreateCondBr(cond, release, cont);
- CGF.EmitBlock(release);
- object = CGF.Builder.CreateLoad(ptr);
- } else {
- object = ptr;
- }
-
CGF.EmitARCRelease(object, /*precise*/ true);
-
- if (cont) CGF.EmitBlock(cont);
}
};
}
-/// Produce the code for a CK_ObjCConsumeObject. Does a primitive
+/// Produce the code for a CK_ARCConsumeObject. Does a primitive
/// release at the end of the full-expression.
llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
llvm::Value *object) {
// If we're in a conditional branch, we need to make the cleanup
- // conditional. FIXME: this really needs to be supported by the
- // environment.
- llvm::AllocaInst *cond;
- llvm::Value *ptr;
- if (isInConditionalBranch()) {
- cond = CreateTempAlloca(Builder.getInt1Ty(), "release.cond");
- ptr = CreateTempAlloca(object->getType(), "release.value");
-
- // The alloca is false until we get here.
- // FIXME: er. doesn't this need to be set at the start of the condition?
- InitTempAlloca(cond, Builder.getFalse());
-
- // Then it turns true.
- Builder.CreateStore(Builder.getTrue(), cond);
- Builder.CreateStore(object, ptr);
- } else {
- cond = 0;
- ptr = object;
- }
-
- EHStack.pushCleanup<CallObjCRelease>(getARCCleanupKind(), type, ptr, cond);
+ // conditional.
+ pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
return object;
}
@@ -1276,8 +1521,8 @@ llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
- const llvm::FunctionType *type,
- llvm::StringRef fnName) {
+ llvm::FunctionType *type,
+ StringRef fnName) {
llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
// In -fobjc-no-arc-runtime, emit weak references to the runtime
@@ -1295,18 +1540,18 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
llvm::Constant *&fn,
- llvm::StringRef fnName) {
+ StringRef fnName) {
if (isa<llvm::ConstantPointerNull>(value)) return value;
if (!fn) {
std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id'.
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
@@ -1322,16 +1567,16 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
llvm::Value *addr,
llvm::Constant *&fn,
- llvm::StringRef fnName) {
+ StringRef fnName) {
if (!fn) {
std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id*'.
- const llvm::Type *origType = addr->getType();
+ llvm::Type *origType = addr->getType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
// Call the function.
@@ -1353,7 +1598,7 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
llvm::Value *addr,
llvm::Value *value,
llvm::Constant *&fn,
- llvm::StringRef fnName,
+ StringRef fnName,
bool ignored) {
assert(cast<llvm::PointerType>(addr->getType())->getElementType()
== value->getType());
@@ -1363,12 +1608,12 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
argTypes[0] = CGF.Int8PtrPtrTy;
argTypes[1] = CGF.Int8PtrTy;
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
@@ -1387,12 +1632,12 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
llvm::Value *dst,
llvm::Value *src,
llvm::Constant *&fn,
- llvm::StringRef fnName) {
+ StringRef fnName) {
assert(dst->getType() == src->getType());
if (!fn) {
std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
@@ -1409,7 +1654,7 @@ static void emitARCCopyOperation(CodeGenFunction &CGF,
/// call i8* @objc_retainBlock(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
if (type->isBlockPointerType())
- return EmitARCRetainBlock(value);
+ return EmitARCRetainBlock(value, /*mandatory*/ false);
else
return EmitARCRetainNonBlock(value);
}
@@ -1424,10 +1669,32 @@ llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
/// Retain the given block, with _Block_copy semantics.
/// call i8* @objc_retainBlock(i8* %value)
-llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value) {
- return emitARCValueOperation(*this, value,
- CGM.getARCEntrypoints().objc_retainBlock,
- "objc_retainBlock");
+///
+/// \param mandatory - If false, emit the call with metadata
+/// indicating that it's okay for the optimizer to eliminate this call
+/// if it can prove that the block never escapes except down the stack.
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
+ bool mandatory) {
+ llvm::Value *result
+ = emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+
+ // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
+ // tell the optimizer that it doesn't need to do this copy if the
+ // block doesn't escape, where being passed as an argument doesn't
+ // count as escaping.
+ if (!mandatory && isa<llvm::Instruction>(result)) {
+ llvm::CallInst *call
+ = cast<llvm::CallInst>(result->stripPointerCasts());
+ assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
+
+ SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.arc.copy_on_escape",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+
+ return result;
}
/// Retain the given object which is the result of a function call.
@@ -1442,7 +1709,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
llvm::InlineAsm *&marker
= CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
if (!marker) {
- llvm::StringRef assembly
+ StringRef assembly
= CGM.getTargetCodeGenInfo()
.getARCRetainAutoreleasedReturnValueMarker();
@@ -1468,8 +1735,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
assert(metadata->getNumOperands() <= 1);
if (metadata->getNumOperands() == 0) {
llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
- llvm::Value *args[] = { string };
- metadata->addOperand(llvm::MDNode::get(getLLVMContext(), args));
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
}
}
}
@@ -1490,7 +1756,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
}
@@ -1503,7 +1769,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
call->setDoesNotThrow();
if (!precise) {
- llvm::SmallVector<llvm::Value*,1> args;
+ SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.imprecise_release",
llvm::MDNode::get(Builder.getContext(), args));
}
@@ -1520,7 +1786,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
if (!fn) {
llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
- const llvm::FunctionType *fnType
+ llvm::FunctionType *fnType
= llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
}
@@ -1607,9 +1873,9 @@ llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
if (isa<llvm::ConstantPointerNull>(value)) return value;
- const llvm::Type *origType = value->getType();
+ llvm::Type *origType = value->getType();
value = Builder.CreateBitCast(value, Int8PtrTy);
- value = EmitARCRetainBlock(value);
+ value = EmitARCRetainBlock(value, /*mandatory*/ true);
value = EmitARCAutorelease(value);
return Builder.CreateBitCast(value, origType);
}
@@ -1674,7 +1940,7 @@ void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
}
@@ -1709,7 +1975,7 @@ void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
if (!fn) {
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Int8PtrTy, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
}
@@ -1728,7 +1994,7 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
if (!fn) {
std::vector<llvm::Type*> args(1, Int8PtrTy);
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), args, false);
// We don't want to use a weak import here; instead we should not
@@ -1851,6 +2117,24 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
e = e->IgnoreParens();
QualType type = e->getType();
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() &&
+ !type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue.
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer.
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+
+ // Set the source pointer to NULL.
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
+
+ return TryEmitResult(result, true);
+ }
+
// As a very special optimization, in ARC++, if the l-value is the
// result of a non-volatile assignment, do a simple retain of the
// result of the call to objc_storeWeak instead of reloading.
@@ -1913,35 +2197,53 @@ static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
}
}
+/// Determine whether it might be important to emit a separate
+/// objc_retain_block on the result of the given expression, or
+/// whether it's okay to just emit it in a +1 context.
+static bool shouldEmitSeparateBlockRetain(const Expr *e) {
+ assert(e->getType()->isBlockPointerType());
+ e = e->IgnoreParens();
+
+ // For future goodness, emit block expressions directly in +1
+ // contexts if we can.
+ if (isa<BlockExpr>(e))
+ return false;
+
+ if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ // Emitting these operations in +1 contexts is goodness.
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCProduceObject:
+ return false;
+
+ // These operations preserve a block type.
+ case CK_NoOp:
+ case CK_BitCast:
+ return shouldEmitSeparateBlockRetain(cast->getSubExpr());
+
+ // These operations are known to be bad (or haven't been considered).
+ case CK_AnyPointerToBlockPointerCast:
+ default:
+ return true;
+ }
+ }
+
+ return true;
+}
+
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // Look through cleanups.
+ if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+ return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
+ }
+
// The desired result type, if it differs from the type of the
// ultimate opaque expression.
- const llvm::Type *resultType = 0;
-
- // If we're loading retained from a __strong xvalue, we can avoid
- // an extra retain/release pair by zeroing out the source of this
- // "move" operation.
- if (e->isXValue() && !e->getType().isConstQualified() &&
- e->getType().getObjCLifetime() == Qualifiers::OCL_Strong) {
- // Emit the lvalue
- LValue lv = CGF.EmitLValue(e);
-
- // Load the object pointer and cast it to the appropriate type.
- QualType exprType = e->getType();
- llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
-
- if (resultType)
- result = CGF.Builder.CreateBitCast(result, resultType);
-
- // Set the source pointer to NULL.
- llvm::Value *null
- = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(CGF.ConvertType(exprType)));
- CGF.EmitStoreOfScalar(null, lv);
-
- return TryEmitResult(result, true);
- }
+ llvm::Type *resultType = 0;
while (true) {
e = e->IgnoreParens();
@@ -1969,7 +2271,8 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// These casts can change the type, so remember that and
// soldier on. We only need to remember the outermost such
// cast, though.
- case CK_AnyPointerToObjCPointerCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_BitCast:
if (!resultType)
@@ -1980,15 +2283,49 @@ tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// For consumptions, just emit the subexpression and thus elide
// the retain/release pair.
- case CK_ObjCConsumeObject: {
+ case CK_ARCConsumeObject: {
llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
}
+ // Block extends are net +0. Naively, we could just recurse on
+ // the subexpression, but actually we need to ensure that the
+ // value is copied as a block, so there's a little filter here.
+ case CK_ARCExtendBlockObject: {
+ llvm::Value *result; // will be a +0 value
+
+ // If we can't safely assume the sub-expression will produce a
+ // block-copied value, emit the sub-expression at +0.
+ if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
+ result = CGF.EmitScalarExpr(ce->getSubExpr());
+
+ // Otherwise, try to emit the sub-expression at +1 recursively.
+ } else {
+ TryEmitResult subresult
+ = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
+ result = subresult.getPointer();
+
+ // If that produced a retained value, just use that,
+ // possibly casting down.
+ if (subresult.getInt()) {
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Otherwise it's +0.
+ }
+
+ // Retain the object as a block, then cast down.
+ result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
// For reclaims, emit the subexpression as a retained call and
// skip the consumption.
- case CK_ObjCReclaimReturnedObject: {
+ case CK_ARCReclaimReturnedObject: {
llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
@@ -2067,6 +2404,48 @@ CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
return value;
}
+llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
+ llvm::Value *result;
+ bool doRetain;
+
+ if (shouldEmitSeparateBlockRetain(e)) {
+ result = EmitScalarExpr(e);
+ doRetain = true;
+ } else {
+ TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
+ result = subresult.getPointer();
+ doRetain = !subresult.getInt();
+ }
+
+ if (doRetain)
+ result = EmitARCRetainBlock(result, /*mandatory*/ true);
+ return EmitObjCConsumeObject(e->getType(), result);
+}
+
+llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
+ // In ARC, retain and autorelease the expression.
+ if (getLangOptions().ObjCAutoRefCount) {
+ // Do so before running any cleanups for the full-expression.
+ // tryEmitARCRetainScalarExpr does make an effort to do things
+ // inside cleanups, but there are crazy cases like
+ // @throw A().foo;
+ // where a full retain+autorelease is required and would
+ // otherwise happen after the destructor for the temporary.
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr))
+ expr = ewc->getSubExpr();
+
+ return EmitARCRetainAutoreleaseScalarExpr(expr);
+ }
+
+ // Otherwise, use the normal scalar-expression emission. The
+ // exception machinery doesn't do anything special with the
+ // exception like retaining it, so there's no safety associated with
+ // only running cleanups after the throw has started, and when it
+ // matters it tends to be substantially inferior code.
+ return EmitScalarExpr(expr);
+}
+
std::pair<LValue,llvm::Value*>
CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
bool ignored) {
@@ -2074,10 +2453,20 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
llvm::Value *value = result.getPointer();
+ bool hasImmediateRetain = result.getInt();
+
+ // If we didn't emit a retained object, and the l-value is of block
+ // type, then we need to emit the block-retain immediately in case
+ // it invalidates the l-value.
+ if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
+ value = EmitARCRetainBlock(value, /*mandatory*/ false);
+ hasImmediateRetain = true;
+ }
+
LValue lvalue = EmitLValue(e->getLHS());
// If the RHS was emitted retained, expand this.
- if (result.getInt()) {
+ if (hasImmediateRetain) {
llvm::Value *oldValue =
EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatileQualified(),
lvalue.getAlignment(), e->getType(),
@@ -2111,10 +2500,8 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getLBracLoc());
- DI->EmitRegionStart(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
@@ -2130,19 +2517,16 @@ void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
E = S.body_end(); I != E; ++I)
EmitStmt(*I);
- if (DI) {
- DI->setLocation(S.getRBracLoc());
- DI->EmitRegionEnd(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
}
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
- llvm::Type *paramTypes[] = { VoidPtrTy };
llvm::FunctionType *extenderType
- = llvm::FunctionType::get(VoidTy, paramTypes, /*variadic*/ false);
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, /*variadic*/ false);
llvm::Value *extender
= llvm::InlineAsm::get(extenderType,
/* assembly */ "",
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 61027feb5cb9..d3da649fbbf3 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -36,12 +36,11 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetData.h"
-#include <stdarg.h>
+#include <cstdarg>
using namespace clang;
using namespace CodeGen;
-using llvm::dyn_cast;
namespace {
@@ -82,7 +81,7 @@ class LazyRuntimeFunction {
if (!Function) {
if (0 == FunctionName) return 0;
// We put the return type on the end of the vector, so pop it back off
- const llvm::Type *RetTy = ArgTys.back();
+ llvm::Type *RetTy = ArgTys.back();
ArgTys.pop_back();
llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
Function =
@@ -111,17 +110,17 @@ protected:
llvm::Module &TheModule;
/// strut objc_super. Used for sending messages to super. This structure
/// contains the receiver (object) and the expected class.
- const llvm::StructType *ObjCSuperTy;
+ llvm::StructType *ObjCSuperTy;
/// struct objc_super*. The type of the argument to the superclass message
/// lookup functions.
- const llvm::PointerType *PtrToObjCSuperTy;
+ llvm::PointerType *PtrToObjCSuperTy;
/// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
llvm::PointerType *SelectorTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
- const llvm::IntegerType *Int8Ty;
+ llvm::IntegerType *Int8Ty;
/// Pointer to i8 - LLVM type of char*, for all of the places where the
/// runtime needs to deal with C strings.
llvm::PointerType *PtrToInt8Ty;
@@ -138,7 +137,7 @@ protected:
llvm::PointerType *IdTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
- const llvm::PointerType *PtrToIdTy;
+ llvm::PointerType *PtrToIdTy;
/// The clang type of id. Used when using the clang CGCall infrastructure to
/// call Objective-C methods.
CanQualType ASTIdTy;
@@ -153,14 +152,20 @@ protected:
/// compatibility with GCC...
llvm::IntegerType *LongTy;
/// LLVM type for C size_t. Used in various runtime data structures.
- const llvm::IntegerType *SizeTy;
+ llvm::IntegerType *SizeTy;
+ /// LLVM type for C intptr_t.
+ llvm::IntegerType *IntPtrTy;
/// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
- const llvm::IntegerType *PtrDiffTy;
+ llvm::IntegerType *PtrDiffTy;
/// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
/// variables.
- const llvm::PointerType *PtrToIntTy;
+ llvm::PointerType *PtrToIntTy;
/// LLVM type for Objective-C BOOL type.
- const llvm::Type *BoolTy;
+ llvm::Type *BoolTy;
+ /// 32-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int32Ty;
+ /// 64-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int64Ty;
/// Metadata kind used to tie method lookups to message sends. The GNUstep
/// runtime provides some LLVM passes that can use this to do things like
/// automatic IMP caching and speculative inlining.
@@ -171,7 +176,7 @@ protected:
llvm::Constant *MakeConstantString(const std::string &Str,
const std::string &Name="") {
llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
}
/// Emits a linkonce_odr string, whose name is the prefix followed by the
/// string value. This allows the linker to combine the strings between
@@ -186,14 +191,14 @@ protected:
ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
}
- return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
}
/// Generates a global structure, initialized by the elements in the vector.
/// The element types must match the types of the structure elements in the
/// first argument.
- llvm::GlobalVariable *MakeGlobal(const llvm::StructType *Ty,
- std::vector<llvm::Constant*> &V,
- llvm::StringRef Name="",
+ llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
@@ -203,9 +208,9 @@ protected:
/// Generates a global array. The vector must contain the same number of
/// elements that the array type declares, of the type specified as the array
/// element type.
- llvm::GlobalVariable *MakeGlobal(const llvm::ArrayType *Ty,
- std::vector<llvm::Constant*> &V,
- llvm::StringRef Name="",
+ llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
@@ -214,9 +219,9 @@ protected:
}
/// Generates a global array, inferring the array type from the specified
/// element type and the size of the initialiser.
- llvm::GlobalVariable *MakeGlobalArray(const llvm::Type *Ty,
- std::vector<llvm::Constant*> &V,
- llvm::StringRef Name="",
+ llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
@@ -225,7 +230,7 @@ protected:
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
- llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, llvm::Type *Ty){
if (V->getType() == Ty) return V;
return B.CreateBitCast(V, Ty);
}
@@ -268,7 +273,7 @@ private:
/// Type of the selector map. This is roughly equivalent to the structure
/// used in the GNUstep runtime, which maintains a list of all of the valid
/// types for a selector in a table.
- typedef llvm::DenseMap<Selector, llvm::SmallVector<TypedSelector, 2> >
+ typedef llvm::DenseMap<Selector, SmallVector<TypedSelector, 2> >
SelectorMap;
/// A map from selectors to selector types. This allows us to emit all
/// selectors of the same name and type together.
@@ -332,18 +337,18 @@ private:
/// metadata. This is used purely for introspection in the fragile ABI. In
/// the non-fragile ABI, it's used for instance variable fixup.
llvm::Constant *GenerateIvarList(
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ const SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const SmallVectorImpl<llvm::Constant *> &IvarOffsets);
/// Generates a method list structure. This is a structure containing a size
/// and an array of structures containing method metadata.
///
/// This structure is used by both classes and categories, and contains a next
/// pointer allowing them to be chained together in a linked list.
- llvm::Constant *GenerateMethodList(const llvm::StringRef &ClassName,
- const llvm::StringRef &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ llvm::Constant *GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ const SmallVectorImpl<Selector> &MethodSels,
+ const SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList);
/// Emits an empty protocol. This is used for @protocol() where no protocol
/// is found. The runtime will (hopefully) fix up the pointer to refer to the
@@ -352,12 +357,12 @@ private:
/// Generates a list of property metadata structures. This follows the same
/// pattern as method and instance variable metadata lists.
llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
- llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
- llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
/// Generates a list of referenced protocols. Classes, categories, and
/// protocols all use this structure.
llvm::Constant *GenerateProtocolList(
- const llvm::SmallVectorImpl<std::string> &Protocols);
+ const SmallVectorImpl<std::string> &Protocols);
/// To ensure that all protocols are seen by the runtime, we add a category on
/// a class defined in the runtime, declaring no methods, but adopting the
/// protocols. This is a horribly ugly hack, but it allows us to collect all
@@ -376,12 +381,14 @@ private:
llvm::Constant *Protocols,
llvm::Constant *IvarOffsets,
llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
bool isMeta=false);
/// Generates a method list. This is used by protocols to define the required
/// and optional methods.
llvm::Constant *GenerateProtocolMethodList(
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ const SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const SmallVectorImpl<llvm::Constant *> &MethodTypes);
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
@@ -403,12 +410,24 @@ protected:
llvm::Value *&Receiver,
llvm::Value *cmd,
llvm::MDNode *node) = 0;
- /// Looks up the method for sending a message to a superclass. This mechanism
- /// differs between the GCC and GNU runtimes, so this method must be
- /// overridden in subclasses.
+ /// Looks up the method for sending a message to a superclass. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must
+ /// be overridden in subclasses.
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
llvm::Value *ObjCSuper,
llvm::Value *cmd) = 0;
+ /// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+ /// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+ /// bits set to their values, LSB first, while larger ones are stored in a
+ /// structure of this / form:
+ ///
+ /// struct { int32_t length; int32_t values[length]; };
+ ///
+ /// The values in the array are stored in host-endian format, with the least
+ /// significant bit being assumed to come first in the bitfield. Therefore,
+ /// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
+ /// while a bitfield / with the 63rd bit set will be 1<<64.
+ llvm::Constant *MakeBitField(llvm::SmallVectorImpl<bool> &bits);
public:
CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
unsigned protocolClassVersion);
@@ -622,7 +641,7 @@ class CGObjCGNUstep : public CGObjCGNU {
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
// void __cxa_end_catch(void)
- EnterCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, NULL);
+ ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, NULL);
// void _Unwind_Resume_or_Rethrow(void*)
ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy, PtrTy, NULL);
}
@@ -650,13 +669,13 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForMethod(const llvm::StringRef &ClassName,
- const llvm::StringRef &CategoryName, const Selector MethodName,
+static std::string SymbolNameForMethod(const StringRef &ClassName,
+ const StringRef &CategoryName, const Selector MethodName,
bool isClassMethod) {
std::string MethodNameColonStripped = MethodName.getAsString();
std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
':', '_');
- return (llvm::Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
CategoryName + "_" + MethodNameColonStripped).str();
}
@@ -697,6 +716,12 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
PtrTy = PtrToInt8Ty;
+ Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ Int64Ty = llvm::Type::getInt64Ty(VMContext);
+
+ IntPtrTy =
+ TheModule.getPointerSize() == llvm::Module::Pointer32 ? Int32Ty : Int64Ty;
+
// Object type
QualType UnqualIdTy = CGM.getContext().getObjCIdType();
ASTIdTy = CanQualType();
@@ -744,11 +769,11 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
true));
const LangOptions &Opts = CGM.getLangOptions();
- if ((Opts.getGCMode() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
RuntimeVersion = 10;
// Don't bother initialising the GC stuff unless we're compiling in GC mode
- if (Opts.getGCMode() != LangOptions::NonGC) {
+ if (Opts.getGC() != LangOptions::NonGC) {
// This is a bit of an hack. We should sort this out by having a proper
// CGObjCGNUstep subclass for GC, but we may want to really support the old
// ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
@@ -793,9 +818,8 @@ llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder,
EmitClassRef(Name);
ClassName = Builder.CreateStructGEP(ClassName, 0);
- llvm::Type *ArgTys[] = { PtrToInt8Ty };
llvm::Constant *ClassLookupFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, ArgTys, true),
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
"objc_lookup_class");
return Builder.CreateCall(ClassLookupFn, ClassName);
}
@@ -813,11 +837,11 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
const std::string &TypeEncoding, bool lval) {
- llvm::SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
+ SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = 0;
- for (llvm::SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
e = Types.end() ; i!=e ; i++) {
if (i->first == TypeEncoding) {
SelValue = i->second;
@@ -918,7 +942,7 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
llvm::GlobalValue::ExternalLinkage, 0, vtableName);
}
llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
- Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, &Two, 1);
+ Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, Two);
Vtable = llvm::ConstantExpr::getBitCast(Vtable, PtrToInt8Ty);
llvm::Constant *typeName =
@@ -972,7 +996,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method) {
CGBuilderTy &Builder = CGF.Builder;
- if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly) {
+ if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -999,13 +1023,11 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (isCategoryImpl) {
llvm::Constant *classLookupFunction = 0;
if (IsClassMessage) {
- llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, ArgTys, true), "objc_get_meta_class");
+ IdTy, PtrTy, true), "objc_get_meta_class");
} else {
- llvm::Type *ArgTys[] = { PtrTy };
classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- IdTy, ArgTys, true), "objc_get_class");
+ IdTy, PtrTy, true), "objc_get_class");
}
ReceiverClass = Builder.CreateCall(classLookupFunction,
MakeConstantString(Class->getNameAsString()));
@@ -1048,7 +1070,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
- const llvm::FunctionType *impType =
+ llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
// Get the IMP
@@ -1082,7 +1104,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
// Strip out message sends to retain / release in GC mode
- if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly) {
+ if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
if (Sel == RetainSel || Sel == AutoreleaseSel) {
return RValue::get(EnforceType(Builder, Receiver,
CGM.getTypes().ConvertType(ResultType)));
@@ -1148,7 +1170,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
- const llvm::FunctionType *impType =
+ llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
imp = EnforceType(Builder, imp, llvm::PointerType::getUnqual(impType));
@@ -1175,7 +1197,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
} else if (msgRet.isAggregate()) {
llvm::Value *v = msgRet.getAggregateAddr();
llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- const llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
llvm::AllocaInst *NullVal =
CGF.CreateTempAlloca(RetTy->getElementType(), "null");
CGF.InitTempAlloca(NullVal,
@@ -1201,10 +1223,10 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
-llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
- const llvm::StringRef &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+llvm::Constant *CGObjCGNU::GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ const SmallVectorImpl<Selector> &MethodSels,
+ const SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
@@ -1239,8 +1261,7 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
Methods);
// Structure containing list pointer, array and array count
- llvm::StructType *ObjCMethodListTy =
- llvm::StructType::createNamed(VMContext, "");
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
ObjCMethodListTy->setBody(
NextPtrTy,
@@ -1251,8 +1272,7 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
Methods.clear();
Methods.push_back(llvm::ConstantPointerNull::get(
llvm::PointerType::getUnqual(ObjCMethodListTy)));
- Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- MethodTypes.size()));
+ Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
Methods.push_back(MethodArray);
// Create an instance of the structure
@@ -1261,9 +1281,9 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const llvm::StringRef &ClassName,
/// Generates an IvarList. Used in construction of a objc_class.
llvm::Constant *CGObjCGNU::GenerateIvarList(
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
- const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+ const SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
if (IvarNames.size() == 0)
return NULLPtr;
// Get the method structure type.
@@ -1312,6 +1332,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
llvm::Constant *Protocols,
llvm::Constant *IvarOffsets,
llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
bool isMeta) {
// Set up the class structure
// Note: Several of these are char*s when they should be ids. This is
@@ -1339,6 +1361,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
LongTy, // abi_version
IvarOffsets->getType(), // ivar_offsets
Properties->getType(), // properties
+ Int64Ty, // strong_pointers
+ Int64Ty, // weak_pointers
NULL);
llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
// Fill in the structure
@@ -1363,9 +1387,11 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(NULLPtr);
Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
Elements.push_back(NULLPtr);
- Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
Elements.push_back(IvarOffsets);
Elements.push_back(Properties);
+ Elements.push_back(StrongIvarBitmap);
+ Elements.push_back(WeakIvarBitmap);
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
// messages in the next ABI.
@@ -1374,8 +1400,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
}
llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+ const SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const SmallVectorImpl<llvm::Constant *> &MethodTypes) {
// Get the method structure type.
llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
@@ -1403,7 +1429,7 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
// Create the protocol list structure used in classes, categories and so on
llvm::Constant *CGObjCGNU::GenerateProtocolList(
- const llvm::SmallVectorImpl<std::string> &Protocols) {
+ const SmallVectorImpl<std::string> &Protocols) {
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
llvm::StructType *ProtocolListTy = llvm::StructType::get(
@@ -1438,15 +1464,15 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
- const llvm::Type *T =
+ llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
const std::string &ProtocolName) {
- llvm::SmallVector<std::string, 0> EmptyStringVector;
- llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+ SmallVector<std::string, 0> EmptyStringVector;
+ SmallVector<llvm::Constant*, 0> EmptyConstantVector;
llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
llvm::Constant *MethodList =
@@ -1465,8 +1491,7 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- ProtocolVersion), IdTy));
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(MethodList);
@@ -1479,14 +1504,14 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
ASTContext &Context = CGM.getContext();
std::string ProtocolName = PD->getNameAsString();
- llvm::SmallVector<std::string, 16> Protocols;
+ SmallVector<std::string, 16> Protocols;
for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
E = PD->protocol_end(); PI != E; ++PI)
Protocols.push_back((*PI)->getNameAsString());
- llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
- llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
- llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
E = PD->instmeth_end(); iter != E; iter++) {
std::string TypeStr;
@@ -1502,10 +1527,10 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
}
}
// Collect information about class methods:
- llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
- llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
- llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
for (ObjCProtocolDecl::classmeth_iterator
iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
iter != endIter ; iter++) {
@@ -1626,8 +1651,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- ProtocolVersion), IdTy));
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
@@ -1642,8 +1666,8 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
}
void CGObjCGNU::GenerateProtocolHolderCategory(void) {
// Collect information about instance methods
- llvm::SmallVector<Selector, 1> MethodSels;
- llvm::SmallVector<llvm::Constant*, 1> MethodTypes;
+ SmallVector<Selector, 1> MethodSels;
+ SmallVector<llvm::Constant*, 1> MethodTypes;
std::vector<llvm::Constant*> Elements;
const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
@@ -1686,12 +1710,55 @@ void CGObjCGNU::GenerateProtocolHolderCategory(void) {
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
+/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+/// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+/// bits set to their values, LSB first, while larger ones are stored in a
+/// structure of this / form:
+///
+/// struct { int32_t length; int32_t values[length]; };
+///
+/// The values in the array are stored in host-endian format, with the least
+/// significant bit being assumed to come first in the bitfield. Therefore, a
+/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
+/// bitfield / with the 63rd bit set will be 1<<64.
+llvm::Constant *CGObjCGNU::MakeBitField(llvm::SmallVectorImpl<bool> &bits) {
+ int bitCount = bits.size();
+ if (bitCount < 64) {
+ uint64_t val = 1;
+ for (int i=0 ; i<bitCount ; ++i) {
+ if (bits[i]) val |= 1ULL<<(i+1);
+ }
+ return llvm::ConstantInt::get(Int64Ty, val);
+ }
+ llvm::SmallVector<llvm::Constant*, 8> values;
+ int v=0;
+ while (v < bitCount) {
+ int32_t word = 0;
+ for (int i=0 ; (i<32) && (v<bitCount) ; ++i) {
+ if (bits[v]) word |= 1<<i;
+ v++;
+ }
+ values.push_back(llvm::ConstantInt::get(Int32Ty, word));
+ }
+ llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
+ llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
+ llvm::Constant *fields[2] = {
+ llvm::ConstantInt::get(Int32Ty, values.size()),
+ array };
+ llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
+ NULL), fields);
+ llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
+ if (IntPtrTy != Int64Ty)
+ ptr = llvm::ConstantExpr::getZExt(ptr, Int64Ty);
+ return ptr;
+}
+
void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
std::string ClassName = OCD->getClassInterface()->getNameAsString();
std::string CategoryName = OCD->getNameAsString();
// Collect information about instance methods
- llvm::SmallVector<Selector, 16> InstanceMethodSels;
- llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (ObjCCategoryImplDecl::instmeth_iterator
iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
iter != endIter ; iter++) {
@@ -1702,8 +1769,8 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
}
// Collect information about class methods
- llvm::SmallVector<Selector, 16> ClassMethodSels;
- llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (ObjCCategoryImplDecl::classmeth_iterator
iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
iter != endIter ; iter++) {
@@ -1714,7 +1781,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
}
// Collect the names of referenced protocols
- llvm::SmallVector<std::string, 16> Protocols;
+ SmallVector<std::string, 16> Protocols;
const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
@@ -1741,8 +1808,8 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
}
llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
- llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
- llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
ASTContext &Context = CGM.getContext();
//
// Property metadata: name, attributes, isSynthesized, setter name, setter
@@ -1845,11 +1912,13 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
// Collect information about instance variables.
- llvm::SmallVector<llvm::Constant*, 16> IvarNames;
- llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
- llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+ SmallVector<llvm::Constant*, 16> IvarNames;
+ SmallVector<llvm::Constant*, 16> IvarTypes;
+ SmallVector<llvm::Constant*, 16> IvarOffsets;
std::vector<llvm::Constant*> IvarOffsetValues;
+ SmallVector<bool, 16> WeakIvars;
+ SmallVector<bool, 16> StrongIvars;
int superInstanceSize = !SuperClassDecl ? 0 :
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
@@ -1859,12 +1928,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
instanceSize = 0 - (instanceSize - superInstanceSize);
}
- // Collect declared and synthesized ivars.
- llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
- CGM.getContext().ShallowCollectObjCIvars(ClassDecl, OIvars);
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
// Store the name
IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
// Get the type encoding for this ivar
@@ -1896,14 +1961,30 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
IVD->getNameAsString());
IvarOffsets.push_back(OffsetValue);
IvarOffsetValues.push_back(OffsetVar);
+ Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
+ switch (lt) {
+ case Qualifiers::OCL_Strong:
+ StrongIvars.push_back(true);
+ WeakIvars.push_back(false);
+ break;
+ case Qualifiers::OCL_Weak:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(true);
+ break;
+ default:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(false);
+ }
}
+ llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
+ llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
llvm::GlobalVariable *IvarOffsetArray =
MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
// Collect information about instance methods
- llvm::SmallVector<Selector, 16> InstanceMethodSels;
- llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (ObjCImplementationDecl::instmeth_iterator
iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
iter != endIter ; iter++) {
@@ -1918,8 +1999,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Collect information about class methods
- llvm::SmallVector<Selector, 16> ClassMethodSels;
- llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (ObjCImplementationDecl::classmeth_iterator
iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
iter != endIter ; iter++) {
@@ -1929,7 +2010,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect the names of referenced protocols
- llvm::SmallVector<std::string, 16> Protocols;
+ SmallVector<std::string, 16> Protocols;
const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
E = Protos.end(); I != E; ++I)
@@ -1945,7 +2026,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
}
// Empty vector used to construct empty method lists
- llvm::SmallVector<llvm::Constant*, 1> empty;
+ SmallVector<llvm::Constant*, 1> empty;
// Generate the method and instance variable lists
llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
InstanceMethodSels, InstanceMethodTypes, false);
@@ -1963,20 +2044,20 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// setting up the alias. These are: The base address for the global, the
// ivar array (second field), the ivar in this list (set for each ivar), and
// the offset (third field in ivar structure)
- const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *IndexTy = Int32Ty;
llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
llvm::ConstantInt::get(IndexTy, 1), 0,
llvm::ConstantInt::get(IndexTy, 2) };
-
- for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
- ObjCIvarDecl *IVD = OIvars[i];
+ unsigned ivarIndex = 0;
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ IVD->getNameAsString();
- offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i);
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex);
// Get the correct ivar field
llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
- IvarList, offsetPointerIndexes, 4);
+ IvarList, offsetPointerIndexes);
// Get the existing variable, if one exists.
llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
if (offset) {
@@ -1990,11 +2071,14 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
}
+ ++ivarIndex;
}
+ llvm::Constant *Zero64 = llvm::ConstantInt::get(Int64Ty, 0);
//Generate metaclass for class methods
llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
- empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr, true);
+ empty, empty, empty), ClassMethodList, NULLPtr,
+ NULLPtr, NULLPtr, Zero64, Zero64, true);
// Generate the class structure
llvm::Constant *ClassStruct =
@@ -2002,7 +2086,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassName.c_str(), 0,
llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
- Properties);
+ Properties, StrongIvarBitmap, WeakIvarBitmap);
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
@@ -2033,7 +2117,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
llvm::Type *SelStructPtrTy = SelectorTy;
if (SelStructTy == 0) {
@@ -2049,7 +2133,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
ConstantStrings.size() + 1);
ConstantStrings.push_back(NULLPtr);
- llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
if (StringClass.empty()) StringClass = "NXConstantString";
@@ -2088,8 +2172,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
std::string SelNameStr = iter->first.getAsString();
llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
- llvm::SmallVectorImpl<TypedSelector> &Types = iter->second;
- for (llvm::SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ SmallVectorImpl<TypedSelector> &Types = iter->second;
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
e = Types.end() ; i!=e ; i++) {
llvm::Constant *SelectorTypeEncoding = NULLPtr;
@@ -2126,10 +2210,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
for (unsigned int i=0 ; i<SelectorCount ; i++) {
llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), i), Zeros[0]};
+ llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
// FIXME: We're generating redundant loads and stores here!
llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(SelectorList,
- Idxs, 2);
+ makeArrayRef(Idxs, 2));
// If selectors are defined as an opaque type, cast the pointer to this
// type.
SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
@@ -2177,7 +2261,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Elements.push_back(SymTab);
if (RuntimeVersion >= 10)
- switch (CGM.getLangOptions().getGCMode()) {
+ switch (CGM.getLangOptions().getGC()) {
case LangOptions::GCOnly:
Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
break;
@@ -2205,9 +2289,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
- llvm::Type *ArgTys[] = { llvm::PointerType::getUnqual(ModuleTy) };
llvm::FunctionType *FT =
- llvm::FunctionType::get(Builder.getVoidTy(), ArgTys, true);
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ llvm::PointerType::getUnqual(ModuleTy), true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@@ -2219,13 +2303,13 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
const ObjCCategoryImplDecl *OCD =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
- llvm::StringRef CategoryName = OCD ? OCD->getName() : "";
- llvm::StringRef ClassName = CD->getName();
+ StringRef CategoryName = OCD ? OCD->getName() : "";
+ StringRef ClassName = CD->getName();
Selector MethodName = OMD->getSelector();
bool isClassMethod = !OMD->isInstanceMethod();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
@@ -2285,15 +2369,14 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value *ExceptionAsObject;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
- llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
ExceptionAsObject = Exception;
} else {
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
- ExceptionAsObject =
- CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+ ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
// Note: This may have to be an invoke, if we want to support constructs like:
// @try {
@@ -2341,7 +2424,7 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
B.CreateCall2(GlobalAssignFn, src, dst);
else
// FIXME. Add threadloca assign API
- assert(false && "EmitObjCGlobalAssign - Threal Local API NYI");
+ llvm_unreachable("EmitObjCGlobalAssign - Threal Local API NYI");
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
@@ -2396,15 +2479,15 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
const_cast<ObjCInterfaceDecl *>(ID)))
Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
- llvm::ConstantInt *OffsetGuess =
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
+ llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset,
+ /*isSigned*/true);
// Don't emit the guess in non-PIC code because the linker will not be able
// to replace it with the real version for a library. In non-PIC code you
// must compile with the fragile ABI if you want to use ivars from a
// GCC-compiled class.
if (CGM.getLangOptions().PICLevel) {
llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
- llvm::Type::getInt32Ty(VMContext), false,
+ Int32Ty, false,
llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
@@ -2432,10 +2515,9 @@ LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF,
static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
const ObjCInterfaceDecl *OID,
const ObjCIvarDecl *OIVD) {
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- Context.ShallowCollectObjCIvars(OID, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
- if (OIVD == Ivars[k])
+ for (const ObjCIvarDecl *next = OID->all_declared_ivar_begin(); next;
+ next = next->getNextIvar()) {
+ if (OIVD == next)
return OID;
}
@@ -2461,12 +2543,12 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
llvm::Value *Offset = TheModule.getGlobalVariable(name);
if (!Offset)
Offset = new llvm::GlobalVariable(TheModule, IntTy,
- false, llvm::GlobalValue::CommonLinkage,
- 0, name);
+ false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ llvm::Constant::getNullValue(IntTy), name);
return CGF.Builder.CreateLoad(Offset);
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
- return llvm::ConstantInt::get(PtrDiffTy, Offset, "ivar");
+ return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
}
CGObjCRuntime *
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 010b9e174e46..308e0c7d3786 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -205,14 +205,14 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// id objc_getProperty (id, SEL, ptrdiff_t, bool)
- llvm::SmallVector<CanQualType,4> Params;
+ SmallVector<CanQualType,4> Params;
CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
Params.push_back(SelType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
FunctionType::ExtInfo()),
false);
@@ -223,7 +223,7 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
- llvm::SmallVector<CanQualType,6> Params;
+ SmallVector<CanQualType,6> Params;
CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
@@ -232,7 +232,7 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -244,13 +244,13 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_copyStruct (void *, const void *, size_t, bool, bool)
- llvm::SmallVector<CanQualType,5> Params;
+ SmallVector<CanQualType,5> Params;
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.LongTy);
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -261,9 +261,9 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_enumerationMutation (id)
- llvm::SmallVector<CanQualType,1> Params;
+ SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
FunctionType::ExtInfo()),
false);
@@ -669,8 +669,8 @@ protected:
unsigned ObjCABI;
// gc ivar layout bitmap calculation helper caches.
- llvm::SmallVector<GC_IVAR, 16> SkipIvars;
- llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+ SmallVector<GC_IVAR, 16> SkipIvars;
+ SmallVector<GC_IVAR, 16> IvarsInfo;
/// LazySymbols - Symbols to generate a lazy reference for. See
/// DefinedSymbols and FinishModule().
@@ -733,7 +733,7 @@ protected:
/// \param[out] NameOut - The return value.
void GetNameForMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD,
- llvm::SmallVectorImpl<char> &NameOut);
+ SmallVectorImpl<char> &NameOut);
/// GetMethodVarName - Return a unique constant for the given
/// selector's name. The return value has type char *.
@@ -775,7 +775,7 @@ protected:
void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ const SmallVectorImpl<const FieldDecl*> &RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion);
@@ -786,7 +786,7 @@ protected:
/// EmitPropertyList - Emit the given property list. The return
/// value has type PropertyListPtrTy.
- llvm::Constant *EmitPropertyList(llvm::Twine Name,
+ llvm::Constant *EmitPropertyList(Twine Name,
const Decl *Container,
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes);
@@ -817,7 +817,7 @@ protected:
/// \param Align - The alignment for the variable, or 0.
/// \param AddToUsed - Whether the variable should be added to
/// "llvm.used".
- llvm::GlobalVariable *CreateMetadataVar(llvm::Twine Name,
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
llvm::Constant *Init,
const char *Section,
unsigned Align,
@@ -923,7 +923,7 @@ private:
/// EmitMethodList - Emit the method list for the given
/// implementation. The return value has type MethodListPtrTy.
- llvm::Constant *EmitMethodList(llvm::Twine Name,
+ llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
const ConstantVector &Methods);
@@ -938,7 +938,7 @@ private:
/// - begin, end: The method list to output.
///
/// The return value has type MethodDescriptionListPtrTy.
- llvm::Constant *EmitMethodDescList(llvm::Twine Name,
+ llvm::Constant *EmitMethodDescList(Twine Name,
const char *Section,
const ConstantVector &Methods);
@@ -964,7 +964,7 @@ private:
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
- llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ llvm::Constant *EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end);
@@ -1060,8 +1060,7 @@ public:
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
- assert(false && "CGObjCMac::GetClassGlobal");
- return 0;
+ llvm_unreachable("CGObjCMac::GetClassGlobal");
}
};
@@ -1117,7 +1116,7 @@ private:
/// EmitMethodList - Emit the method list for the given
/// implementation. The return value has type MethodListnfABITy.
- llvm::Constant *EmitMethodList(llvm::Twine Name,
+ llvm::Constant *EmitMethodList(Twine Name,
const char *Section,
const ConstantVector &Methods);
/// EmitIvarList - Emit the ivar list for the given
@@ -1144,7 +1143,7 @@ private:
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
- llvm::Constant *EmitProtocolList(llvm::Twine Name,
+ llvm::Constant *EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end);
@@ -1375,7 +1374,7 @@ static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
};
- return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs);
}
/// hasObjCExceptionAttribute - Return true if this class or any super
@@ -1418,12 +1417,12 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
if (T->isObjCIdType() ||
T->isObjCQualifiedIdType()) {
return CGM.GetAddrOfRTTIDescriptor(
- CGM.getContext().ObjCIdRedefinitionType, /*ForEH=*/true);
+ CGM.getContext().getObjCIdRedefinitionType(), /*ForEH=*/true);
}
if (T->isObjCClassType() ||
T->isObjCQualifiedClassType()) {
return CGM.GetAddrOfRTTIDescriptor(
- CGM.getContext().ObjCClassRedefinitionType, /*ForEH=*/true);
+ CGM.getContext().getObjCClassRedefinitionType(), /*ForEH=*/true);
}
if (T->isObjCObjectPointerType())
return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
@@ -1510,7 +1509,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
- const llvm::Type *ClassTy =
+ llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
@@ -1549,7 +1548,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
const ObjCCommonTypesHelper &ObjCTypes) {
CallArgList ActualArgs;
if (!IsSuper)
- Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy);
ActualArgs.add(RValue::get(Arg0), Arg0Ty);
ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
@@ -1557,7 +1556,7 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
FunctionType::ExtInfo());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
if (Method)
@@ -1605,15 +1604,15 @@ llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
llvm::Constant *nullPtr =
llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
!CGM.getLangOptions().ObjCAutoRefCount)
return nullPtr;
bool hasUnion = false;
SkipIvars.clear();
IvarsInfo.clear();
- unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
// __isa is the first field in block descriptor and must assume by runtime's
// convention that it is GC'able.
@@ -1878,7 +1877,7 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
};
*/
llvm::Constant *
-CGObjCMac::EmitProtocolList(llvm::Twine Name,
+CGObjCMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
std::vector<llvm::Constant*> ProtocolRefs;
@@ -1942,7 +1941,7 @@ void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierI
struct _objc_property[prop_count];
};
*/
-llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
const Decl *Container,
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes) {
@@ -2014,7 +2013,7 @@ CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
Desc);
}
-llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
+llvm::Constant *CGObjCMac::EmitMethodDescList(Twine Name,
const char *Section,
const ConstantVector &Methods) {
// Return null for empty list.
@@ -2407,10 +2406,9 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
if (ForClass)
return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
- ObjCInterfaceDecl *OID =
- const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
- for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
@@ -2476,7 +2474,7 @@ llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
}
-llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
+llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
const char *Section,
const ConstantVector &Methods) {
// Return null for empty list.
@@ -2501,7 +2499,7 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
GetNameForMethod(OMD, CD, Name);
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
llvm::Function *Method =
llvm::Function::Create(MethodTy,
@@ -2514,12 +2512,12 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
}
llvm::GlobalVariable *
-CGObjCCommonMac::CreateMetadataVar(llvm::Twine Name,
+CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::Constant *Init,
const char *Section,
unsigned Align,
bool AddToUsed) {
- const llvm::Type *Ty = Init->getType();
+ llvm::Type *Ty = Init->getType();
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Ty, false,
llvm::GlobalValue::InternalLinkage, Init, Name);
@@ -2627,7 +2625,7 @@ namespace {
class FragileHazards {
CodeGenFunction &CGF;
- llvm::SmallVector<llvm::Value*, 20> Locals;
+ SmallVector<llvm::Value*, 20> Locals;
llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
llvm::InlineAsm *ReadHazard;
@@ -2754,7 +2752,6 @@ void FragileHazards::collectLocals() {
llvm::DenseSet<llvm::Value*> AllocasToIgnore;
addIfPresent(AllocasToIgnore, CGF.ReturnValue);
addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
- addIfPresent(AllocasToIgnore, CGF.EHCleanupDest);
// Collect all the allocas currently in the function. This is
// probably way too aggressive.
@@ -2766,7 +2763,7 @@ void FragileHazards::collectLocals() {
}
llvm::FunctionType *FragileHazards::GetAsmFnType() {
- llvm::SmallVector<llvm::Type *, 16> tys(Locals.size());
+ SmallVector<llvm::Type *, 16> tys(Locals.size());
for (unsigned i = 0, e = Locals.size(); i != e; ++i)
tys[i] = Locals[i]->getType();
return llvm::FunctionType::get(CGF.VoidTy, tys, false);
@@ -2958,7 +2955,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
llvm::Value *SetJmpBuffer =
- CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer");
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, "setjmp_buffer");
llvm::CallInst *SetJmpResult =
CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
SetJmpResult->setDoesNotThrow();
@@ -3119,8 +3116,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Initialize the catch variable.
llvm::Value *Tmp =
CGF.Builder.CreateBitCast(Caught,
- CGF.ConvertType(CatchParam->getType()),
- "tmp");
+ CGF.ConvertType(CatchParam->getType()));
CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
CGF.EmitStmt(CatchStmt->getCatchBody());
@@ -3208,9 +3204,9 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value *ExceptionAsObject;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
- llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
ExceptionAsObject =
- CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
} else {
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
@@ -3230,7 +3226,7 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
///
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
- const llvm::Type* DestTy =
+ llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
ObjCTypes.PtrObjectPtrTy);
@@ -3245,7 +3241,7 @@ llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
///
void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3266,7 +3262,7 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
bool threadlocal) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3292,7 +3288,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
llvm::Value *ivarOffset) {
assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3312,7 +3308,7 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
///
void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -3386,15 +3382,15 @@ void CGObjCCommonMac::EmitImageInfo() {
unsigned flags = 0;
// FIXME: Fix and continue?
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ if (CGM.getLangOptions().getGC() != LangOptions::NonGC)
flags |= eImageInfo_GarbageCollected;
- if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ if (CGM.getLangOptions().getGC() == LangOptions::GCOnly)
flags |= eImageInfo_GCOnly;
// We never allow @synthesize of a superclass property.
flags |= eImageInfo_CorrectedSynthesize;
- const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
// Emitted as int[2];
llvm::Constant *values[2] = {
@@ -3498,7 +3494,7 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
4, true);
}
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
@@ -3527,7 +3523,7 @@ llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
if (lvalue)
return Entry;
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
@@ -3551,13 +3547,6 @@ llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
if (I != MethodDefinitions.end())
return I->second;
- if (MD->hasBody() && MD->getPCHLevel() > 0) {
- // MD isn't emitted yet because it comes from PCH.
- CGM.EmitTopLevelDecl(const_cast<ObjCMethodDecl*>(MD));
- assert(MethodDefinitions[MD] && "EmitTopLevelDecl didn't emit the method!");
- return MethodDefinitions[MD];
- }
-
return NULL;
}
@@ -3574,8 +3563,8 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
bool &HasUnion) {
const RecordDecl *RD = RT->getDecl();
// FIXME - Use iterator.
- llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
- const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ SmallVector<const FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
@@ -3586,15 +3575,15 @@ void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
- const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ const SmallVectorImpl<const FieldDecl*> &RecFields,
unsigned int BytePos, bool ForStrongLayout,
bool &HasUnion) {
bool IsUnion = (RD && RD->isUnion());
uint64_t MaxUnionIvarSize = 0;
uint64_t MaxSkippedUnionIvarSize = 0;
- FieldDecl *MaxField = 0;
- FieldDecl *MaxSkippedField = 0;
- FieldDecl *LastFieldBitfieldOrUnnamed = 0;
+ const FieldDecl *MaxField = 0;
+ const FieldDecl *MaxSkippedField = 0;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = 0;
uint64_t MaxFieldOffset = 0;
uint64_t MaxSkippedFieldOffset = 0;
uint64_t LastBitfieldOrUnnamedOffset = 0;
@@ -3602,16 +3591,16 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
if (RecFields.empty())
return;
- unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
- unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
if (!RD && CGM.getLangOptions().ObjCAutoRefCount) {
- FieldDecl *FirstField = RecFields[0];
+ const FieldDecl *FirstField = RecFields[0];
FirstFieldDelta =
ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
}
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
- FieldDecl *Field = RecFields[i];
+ const FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
if (RD) {
// Note that 'i' here is actually the field index inside RD of Field,
@@ -3721,9 +3710,8 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
if (LastFieldBitfieldOrUnnamed) {
if (LastFieldBitfieldOrUnnamed->isBitField()) {
// Last field was a bitfield. Must update skip info.
- Expr *BitWidth = LastFieldBitfieldOrUnnamed->getBitWidth();
- uint64_t BitFieldSize =
- BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
GC_IVAR skivar;
skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
@@ -3754,10 +3742,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
/// filled already by the caller.
llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
unsigned int WordsToScan, WordsToSkip;
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
// Build the string of skip/scan nibbles
- llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ SmallVector<SKIP_SCAN, 32> SkipScanIvars;
unsigned int WordSize =
CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
if (IvarsInfo[0].ivar_bytepos == 0) {
@@ -3898,25 +3886,24 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
bool ForStrongLayout) {
bool hasUnion = false;
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC &&
+ llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (CGM.getLangOptions().getGC() == LangOptions::NonGC &&
!CGM.getLangOptions().ObjCAutoRefCount)
return llvm::Constant::getNullValue(PtrTy);
- ObjCInterfaceDecl *OI =
- const_cast<ObjCInterfaceDecl*>(OMD->getClassInterface());
- llvm::SmallVector<FieldDecl*, 32> RecFields;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ SmallVector<const FieldDecl*, 32> RecFields;
if (CGM.getLangOptions().ObjCAutoRefCount) {
- for (ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar())
RecFields.push_back(cast<FieldDecl>(IVD));
}
else {
- llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+ // FIXME: This is not ideal; we shouldn't have to do this copy.
+ RecFields.append(Ivars.begin(), Ivars.end());
}
if (RecFields.empty())
@@ -4036,7 +4023,7 @@ CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
const ObjCContainerDecl *CD,
- llvm::SmallVectorImpl<char> &Name) {
+ SmallVectorImpl<char> &Name) {
llvm::raw_svector_ostream OS(Name);
assert (CD && "Missing container decl in GetNameForMethod");
OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
@@ -4125,7 +4112,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// FIXME: It would be nice to unify this with the opaque type, so that the IR
// comes out a bit cleaner.
- const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
// I'm not sure I like this. The implicit coordination is a bit
@@ -4160,8 +4147,8 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *name;
// char *attributes;
// }
- PropertyTy = llvm::StructType::createNamed("struct._prop_t",
- Int8PtrTy, Int8PtrTy, NULL);
+ PropertyTy = llvm::StructType::create("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, NULL);
// struct _prop_list_t {
// uint32_t entsize; // sizeof(struct _prop_t)
@@ -4169,10 +4156,8 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _prop_t prop_list[count_of_properties];
// }
PropertyListTy =
- llvm::StructType::createNamed("struct._prop_list_t",
- IntTy, IntTy,
- llvm::ArrayType::get(PropertyTy, 0),
- NULL);
+ llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0), NULL);
// struct _prop_list_t *
PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
@@ -4181,12 +4166,12 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *method_type;
// char *_imp;
// }
- MethodTy = llvm::StructType::createNamed("struct._objc_method",
- SelectorPtrTy, Int8PtrTy, Int8PtrTy,
- NULL);
+ MethodTy = llvm::StructType::create("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ NULL);
// struct _objc_cache *
- CacheTy = llvm::StructType::createNamed(VMContext, "struct._objc_cache");
+ CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
}
@@ -4198,18 +4183,17 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *types;
// }
MethodDescriptionTy =
- llvm::StructType::createNamed("struct._objc_method_description",
- SelectorPtrTy, Int8PtrTy, NULL);
+ llvm::StructType::create("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, NULL);
// struct _objc_method_description_list {
// int count;
// struct _objc_method_description[1];
// }
MethodDescriptionListTy =
- llvm::StructType::createNamed("struct._objc_method_description_list",
- IntTy,
- llvm::ArrayType::get(MethodDescriptionTy, 0),
- NULL);
+ llvm::StructType::create("struct._objc_method_description_list",
+ IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),NULL);
// struct _objc_method_description_list *
MethodDescriptionListPtrTy =
@@ -4224,12 +4208,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_property_list *instance_properties;
// }
ProtocolExtensionTy =
- llvm::StructType::createNamed("struct._objc_protocol_extension",
- IntTy,
- MethodDescriptionListPtrTy,
- MethodDescriptionListPtrTy,
- PropertyListPtrTy,
- NULL);
+ llvm::StructType::create("struct._objc_protocol_extension",
+ IntTy, MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy, PropertyListPtrTy,
+ NULL);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
@@ -4237,10 +4219,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// Handle recursive construction of Protocol and ProtocolList types
ProtocolTy =
- llvm::StructType::createNamed(VMContext, "struct._objc_protocol");
+ llvm::StructType::create(VMContext, "struct._objc_protocol");
ProtocolListTy =
- llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
LongTy,
llvm::ArrayType::get(ProtocolTy, 0),
@@ -4271,26 +4253,26 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *ivar_type;
// int ivar_offset;
// }
- IvarTy = llvm::StructType::createNamed("struct._objc_ivar",
- Int8PtrTy, Int8PtrTy, IntTy, NULL);
+ IvarTy = llvm::StructType::create("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, NULL);
// struct _objc_ivar_list *
IvarListTy =
- llvm::StructType::createNamed(VMContext, "struct._objc_ivar_list");
+ llvm::StructType::create(VMContext, "struct._objc_ivar_list");
IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
// struct _objc_method_list *
MethodListTy =
- llvm::StructType::createNamed(VMContext, "struct._objc_method_list");
+ llvm::StructType::create(VMContext, "struct._objc_method_list");
MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
// struct _objc_class_extension *
ClassExtensionTy =
- llvm::StructType::createNamed("struct._objc_class_extension",
- IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
+ llvm::StructType::create("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
- ClassTy = llvm::StructType::createNamed(VMContext, "struct._objc_class");
+ ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
// struct _objc_class {
// Class isa;
@@ -4331,10 +4313,10 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_property_list *instance_properties;// category's @property
// }
CategoryTy =
- llvm::StructType::createNamed("struct._objc_category",
- Int8PtrTy, Int8PtrTy, MethodListPtrTy,
- MethodListPtrTy, ProtocolListPtrTy,
- IntTy, PropertyListPtrTy, NULL);
+ llvm::StructType::create("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, NULL);
// Global metadata structures
@@ -4346,9 +4328,9 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// char *defs[cls_def_cnt + cat_def_cnt];
// }
SymtabTy =
- llvm::StructType::createNamed("struct._objc_symtab",
- LongTy, SelectorPtrTy, ShortTy, ShortTy,
- llvm::ArrayType::get(Int8PtrTy, 0), NULL);
+ llvm::StructType::create("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), NULL);
SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
// struct _objc_module {
@@ -4358,8 +4340,8 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_symtab* symtab;
// }
ModuleTy =
- llvm::StructType::createNamed("struct._objc_module",
- LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
+ llvm::StructType::create("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
// FIXME: This is the size of the setjmp buffer and should be target
@@ -4371,7 +4353,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::Type::getInt8PtrTy(VMContext), 4);
ExceptionDataTy =
- llvm::StructType::createNamed("struct._objc_exception_data",
+ llvm::StructType::create("struct._objc_exception_data",
llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
SetJmpBufferSize),
StackPtrTy, NULL);
@@ -4386,10 +4368,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// struct _objc_method method_list[method_count];
// }
MethodListnfABITy =
- llvm::StructType::createNamed("struct.__method_list_t",
- IntTy, IntTy,
- llvm::ArrayType::get(MethodTy, 0),
- NULL);
+ llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0), NULL);
// struct method_list_t *
MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
@@ -4408,20 +4388,14 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// Holder for struct _protocol_list_t *
ProtocolListnfABITy =
- llvm::StructType::createNamed(VMContext, "struct._objc_protocol_list");
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
ProtocolnfABITy =
- llvm::StructType::createNamed("struct._protocol_t",
- ObjectPtrTy, Int8PtrTy,
- llvm::PointerType::getUnqual(ProtocolListnfABITy),
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- PropertyListPtrTy,
- IntTy,
- IntTy,
- NULL);
+ llvm::StructType::create("struct._protocol_t", ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ PropertyListPtrTy, IntTy, IntTy, NULL);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -4445,13 +4419,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// uint32_t size;
// }
IvarnfABITy =
- llvm::StructType::createNamed("struct._ivar_t",
- llvm::PointerType::getUnqual(LongTy),
- Int8PtrTy,
- Int8PtrTy,
- IntTy,
- IntTy,
- NULL);
+ llvm::StructType::create("struct._ivar_t",
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
// struct _ivar_list_t {
// uint32 entsize; // sizeof(struct _ivar_t)
@@ -4459,10 +4429,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// struct _iver_t list[count];
// }
IvarListnfABITy =
- llvm::StructType::createNamed("struct._ivar_list_t",
- IntTy, IntTy,
- llvm::ArrayType::get(IvarnfABITy, 0),
- NULL);
+ llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0), NULL);
IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
@@ -4481,18 +4449,12 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
// FIXME. Add 'reserved' field in 64bit abi mode!
- ClassRonfABITy = llvm::StructType::createNamed("struct._class_ro_t",
- IntTy,
- IntTy,
- IntTy,
- Int8PtrTy,
- Int8PtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- IvarListnfABIPtrTy,
- Int8PtrTy,
- PropertyListPtrTy,
- NULL);
+ ClassRonfABITy = llvm::StructType::create("struct._class_ro_t",
+ IntTy, IntTy, IntTy, Int8PtrTy,
+ Int8PtrTy, MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy, PropertyListPtrTy, NULL);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
@@ -4507,7 +4469,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// struct class_ro_t *ro;
// }
- ClassnfABITy = llvm::StructType::createNamed(VMContext, "struct._class_t");
+ ClassnfABITy = llvm::StructType::create(VMContext, "struct._class_t");
ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
llvm::PointerType::getUnqual(ClassnfABITy),
CachePtrTy,
@@ -4526,14 +4488,13 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// const struct _protocol_list_t * const protocols;
// const struct _prop_list_t * const properties;
// }
- CategorynfABITy = llvm::StructType::createNamed("struct._category_t",
- Int8PtrTy,
- ClassnfABIPtrTy,
- MethodListnfABIPtrTy,
- MethodListnfABIPtrTy,
- ProtocolListnfABIPtrTy,
- PropertyListPtrTy,
- NULL);
+ CategorynfABITy = llvm::StructType::create("struct._category_t",
+ Int8PtrTy, ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
// New types for nonfragile abi messaging.
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -4569,8 +4530,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// SEL name;
// };
SuperMessageRefTy =
- llvm::StructType::createNamed("struct._super_message_ref_t",
- ImpnfABITy, SelectorPtrTy, NULL);
+ llvm::StructType::create("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, NULL);
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
@@ -4582,11 +4543,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// Class cls;
// };
EHTypeTy =
- llvm::StructType::createNamed("struct._objc_typeinfo",
- llvm::PointerType::getUnqual(Int8PtrTy),
- Int8PtrTy,
- ClassnfABIPtrTy,
- NULL);
+ llvm::StructType::create("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy, ClassnfABIPtrTy, NULL);
EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
}
@@ -4694,7 +4653,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is disabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+ if (CGM.getLangOptions().getGC() != LangOptions::GCOnly) {
VTableDispatchMethods.insert(GetNullarySelector("retain"));
VTableDispatchMethods.insert(GetNullarySelector("release"));
VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
@@ -4710,7 +4669,7 @@ bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
// These are vtable-based if GC is enabled.
// Optimistically use vtable dispatch for hybrid compiles.
- if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ if (CGM.getLangOptions().getGC() != LangOptions::NonGC) {
VTableDispatchMethods.insert(GetNullarySelector("hash"));
VTableDispatchMethods.insert(GetUnarySelector("addObject"));
@@ -5035,7 +4994,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
if (PTGV)
- return Builder.CreateLoad(PTGV, "tmp");
+ return Builder.CreateLoad(PTGV);
PTGV = new llvm::GlobalVariable(
CGM.getModule(),
Init->getType(), false,
@@ -5045,7 +5004,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.AddUsedGlobal(PTGV);
- return Builder.CreateLoad(PTGV, "tmp");
+ return Builder.CreateLoad(PTGV);
}
/// GenerateCategory - Build metadata for a category implementation.
@@ -5167,7 +5126,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
/// struct _objc_method method_list[method_count];
/// }
///
-llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
const char *Section,
const ConstantVector &Methods) {
// Return null for empty list.
@@ -5258,13 +5217,12 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
std::vector<llvm::Constant*> Ivars, Ivar(5);
- ObjCInterfaceDecl *OID =
- const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
// FIXME. Consolidate this with similar code in GenerateClass.
- for (ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
@@ -5273,7 +5231,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
ComputeIvarBaseOffset(CGM, ID, IVD));
Ivar[1] = GetMethodVarName(IVD->getIdentifier());
Ivar[2] = GetMethodVarType(IVD);
- const llvm::Type *FieldTy =
+ llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
@@ -5461,7 +5419,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
/// @endcode
///
llvm::Constant *
-CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
+CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
std::vector<llvm::Constant*> ProtocolRefs;
@@ -5669,7 +5627,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
bool variadic = method ? method->isVariadic() : false;
- const llvm::FunctionType *fnType =
+ llvm::FunctionType *fnType =
CGF.getTypes().GetFunctionType(fnInfo, variadic);
callee = CGF.Builder.CreateBitCast(callee,
llvm::PointerType::getUnqual(fnType));
@@ -5730,7 +5688,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
CGM.AddUsedGlobal(Entry);
}
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
@@ -5764,7 +5722,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
CGM.AddUsedGlobal(Entry);
}
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
/// EmitMetaClassRef - Return a Value * of the address of _class_t
@@ -5774,7 +5732,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID) {
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (Entry)
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
@@ -5790,7 +5748,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.AddUsedGlobal(Entry);
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
/// GetClass - Return a reference to the class for the given interface
@@ -5847,7 +5805,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
- const llvm::Type *ClassTy =
+ llvm::Type *ClassTy =
CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
@@ -5881,7 +5839,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
if (lval)
return Entry;
- return Builder.CreateLoad(Entry, "tmp");
+ return Builder.CreateLoad(Entry);
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
@@ -5890,7 +5848,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src,
llvm::Value *dst,
llvm::Value *ivarOffset) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5911,7 +5869,7 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5944,7 +5902,7 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
llvm::Value *AddrWeakObj) {
- const llvm::Type* DestTy =
+ llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
@@ -5958,7 +5916,7 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
///
void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -5979,7 +5937,7 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst,
bool threadlocal) {
- const llvm::Type * SrcTy = src->getType();
+ llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
@@ -6043,9 +6001,8 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) {
if (const Expr *ThrowExpr = S.getThrowExpr()) {
- llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
- Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy,
- "tmp");
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
.setDoesNotReturn();
} else {
@@ -6096,7 +6053,7 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
std::vector<llvm::Constant*> Values(3);
- Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+ Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx);
Values[1] = GetClassName(ID->getIdentifier());
Values[2] = GetClassGlobal(ClassName);
llvm::Constant *Init =
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 09c8d0b28f9c..ef426ce6ed9b 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -52,9 +52,8 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
// implemented. This should be fixed to get the information from the layout
// directly.
unsigned Index = 0;
- ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl*>(Container);
- for (ObjCIvarDecl *IVD = IDecl->all_declared_ivar_begin();
+ for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
if (Ivar == IVD)
break;
@@ -86,9 +85,9 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
QualType IvarTy = Ivar->getType();
- const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
@@ -118,10 +117,9 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
- uint64_t ContainingTypeAlign = CGF.CGM.getContext().Target.getCharAlign();
+ uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
- uint64_t BitFieldSize =
- Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+ uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
// Allocate a new CGBitFieldInfo object to describe this access.
//
@@ -178,7 +176,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
beginCatchFn, endCatchFn, exceptionRethrowFn);
- llvm::SmallVector<CatchHandler, 8> Handlers;
+ SmallVector<CatchHandler, 8> Handlers;
// Enter the catch, if there is one.
if (S.getNumCatchStmts()) {
@@ -212,7 +210,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Leave the try.
if (S.getNumCatchStmts())
- CGF.EHStack.popCatch();
+ CGF.popCatchScope();
// Remember where we were.
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
@@ -222,7 +220,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CatchHandler &Handler = Handlers[I];
CGF.EmitBlock(Handler.Block);
- llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
+ llvm::Value *RawExn = CGF.getExceptionFromSlot();
// Enter the catch.
llvm::Value *Exn = RawExn;
@@ -244,7 +242,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Bind the catch parameter if it exists.
if (const VarDecl *CatchParam = Handler.Variable) {
- const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
@@ -289,21 +287,26 @@ void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S,
llvm::Function *syncEnterFn,
llvm::Function *syncExitFn) {
- // Evaluate the lock operand. This should dominate the cleanup.
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(S.getSynchExpr());
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+
+ // Evaluate the lock operand. This is guaranteed to dominate the
+ // ARC release and lock-release cleanups.
+ const Expr *lockExpr = S.getSynchExpr();
+ llvm::Value *lock;
+ if (CGF.getLangOptions().ObjCAutoRefCount) {
+ lock = CGF.EmitARCRetainScalarExpr(lockExpr);
+ lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
+ } else {
+ lock = CGF.EmitScalarExpr(lockExpr);
+ }
+ lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
// Acquire the lock.
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, syncEnterFn->getFunctionType()->getParamType(0));
- CGF.Builder.CreateCall(syncEnterFn, SyncArg);
+ CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
// Register an all-paths cleanup to release the lock.
- CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn,
- SyncArg);
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
-
- // Pop the lock-release cleanup.
- CGF.PopCleanupBlock();
}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 7accc70c9623..4fa47a740aaf 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -208,8 +208,7 @@ public:
virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
- assert(false &&"autoreleasepool unsupported in this ABI");
- return 0;
+ llvm_unreachable("autoreleasepool unsupported in this ABI");
}
/// EnumerationMutationFunction - Return the function that's called by the
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
new file mode 100644
index 000000000000..3a0e116e5ab1
--- /dev/null
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -0,0 +1,28 @@
+//===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
+#include "llvm/GlobalValue.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGOpenCLRuntime::~CGOpenCLRuntime() {}
+
+void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D) {
+ return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+}
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
new file mode 100644
index 000000000000..9a8430fb7500
--- /dev/null
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -0,0 +1,46 @@
+//===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OPENCLRUNTIME_H
+#define CLANG_CODEGEN_OPENCLRUNTIME_H
+
+namespace clang {
+
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+
+class CGOpenCLRuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGOpenCLRuntime();
+
+ /// Emit the IR required for a work-group-local variable declaration, and add
+ /// an entry to CGF's LocalDeclMap for D. The base class does this using
+ /// CodeGenFunction::EmitStaticVarDecl to emit an internal global for D.
+ virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D);
+};
+
+}
+}
+
+#endif
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index e564c7070525..fbdb2984830b 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -26,10 +26,10 @@ class RTTIBuilder {
CodeGenModule &CGM; // Per-module state.
llvm::LLVMContext &VMContext;
- const llvm::Type *Int8PtrTy;
+ llvm::Type *Int8PtrTy;
/// Fields - The fields of the RTTI descriptor currently being built.
- llvm::SmallVector<llvm::Constant *, 16> Fields;
+ SmallVector<llvm::Constant *, 16> Fields;
/// GetAddrOfTypeName - Returns the mangled type name of the given type.
llvm::GlobalVariable *
@@ -120,7 +120,7 @@ RTTIBuilder::GetAddrOfTypeName(QualType Ty,
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
// We know that the mangled name of the type starts at index 4 of the
// mangled name of the typename, so we can just index into it in order to
@@ -141,7 +141,7 @@ llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
// Look for an existing global.
llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
@@ -185,6 +185,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::ULong:
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
+ case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
@@ -203,7 +204,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
- assert(false && "FIXME: Objective-C types are unsupported!");
+ llvm_unreachable("FIXME: Objective-C types are unsupported!");
}
// Silent gcc.
@@ -267,7 +268,7 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
static bool IsIncompleteClassType(const RecordType *RecordTy) {
- return !RecordTy->getDecl()->isDefinition();
+ return !RecordTy->getDecl()->isCompleteDefinition();
}
/// ContainsIncompleteClassType - Returns whether the given type contains an
@@ -393,17 +394,18 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- assert(false && "Non-canonical and dependent types shouldn't get here");
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
case Type::LValueReference:
case Type::RValueReference:
- assert(false && "References shouldn't get here");
+ llvm_unreachable("References shouldn't get here");
case Type::Builtin:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
case Type::Complex:
+ case Type::Atomic:
// FIXME: GCC treats block pointers as fundamental types?!
case Type::BlockPointer:
// abi::__fundamental_type_info.
@@ -479,12 +481,12 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm::Constant *VTable =
CGM.getModule().getOrInsertGlobal(VTableName, Int8PtrTy);
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
// The vtable address point is 2.
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, &Two, 1);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two);
VTable = llvm::ConstantExpr::getBitCast(VTable, Int8PtrTy);
Fields.push_back(VTable);
@@ -533,7 +535,7 @@ maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
@@ -553,7 +555,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
if (OldGV && !OldGV->isDeclaration()) {
@@ -580,7 +582,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// And the name.
llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, Int8PtrTy));
switch (Ty->getTypeClass()) {
@@ -590,7 +592,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
- assert(false && "Non-canonical and dependent types shouldn't get here");
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
// GCC treats vector types as fundamental types.
case Type::Builtin:
@@ -604,7 +606,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
case Type::LValueReference:
case Type::RValueReference:
- assert(false && "References shouldn't get here");
+ llvm_unreachable("References shouldn't get here");
case Type::ConstantArray:
case Type::IncompleteArray:
@@ -656,6 +658,10 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
case Type::MemberPointer:
BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
break;
+
+ case Type::Atomic:
+ // No fields, at least for the moment.
+ break;
}
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
@@ -822,7 +828,7 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
/// classes with bases that do not satisfy the abi::__si_class_type_info
/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
// Itanium C++ ABI 2.9.5p6c:
@@ -840,7 +846,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
if (!RD->getNumBases())
return;
- const llvm::Type *LongLTy =
+ llvm::Type *LongLTy =
CGM.getTypes().ConvertType(CGM.getContext().LongTy);
// Now add the base class descriptions.
@@ -879,7 +885,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
CharUnits Offset;
if (Base->isVirtual())
Offset =
- CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
Offset = Layout.getBaseClassOffset(BaseDecl);
@@ -916,7 +922,7 @@ void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
Flags |= PTI_Incomplete;
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
@@ -953,7 +959,7 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
if (IsIncompleteClassType(ClassType))
Flags |= PTI_ContainingClassIncomplete;
- const llvm::Type *UnsignedIntLTy =
+ llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
@@ -977,12 +983,12 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
if (!ForEH && !getContext().getLangOptions().RTTI) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
return llvm::Constant::getNullValue(Int8PtrTy);
}
if (ForEH && Ty->isObjCObjectPointerType() && !Features.NeXTRuntime) {
- return Runtime->GetEHType(Ty);
+ return ObjCRuntime->GetEHType(Ty);
}
return RTTIBuilder(*this).BuildTypeInfo(Ty);
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 8a450298f70a..25a0a508f188 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -10,12 +10,13 @@
#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
#define CLANG_CODEGEN_CGRECORDLAYOUT_H
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/DerivedTypes.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DerivedTypes.h"
+
namespace llvm {
- class raw_ostream;
class StructType;
}
@@ -144,7 +145,7 @@ public:
/// @}
- void print(llvm::raw_ostream &OS) const;
+ void print(raw_ostream &OS) const;
void dump() const;
/// \brief Given a bit-field decl, build an appropriate helper object for
@@ -270,7 +271,7 @@ public:
return it->second;
}
- void print(llvm::raw_ostream &OS) const;
+ void print(raw_ostream &OS) const;
void dump() const;
};
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 2b07bafa0096..6475ccac0389 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -35,7 +35,7 @@ class CGRecordLayoutBuilder {
public:
/// FieldTypes - Holds the LLVM types that the struct is created from.
///
- llvm::SmallVector<llvm::Type *, 16> FieldTypes;
+ SmallVector<llvm::Type *, 16> FieldTypes;
/// BaseSubobjectType - Holds the LLVM type for the non-virtual part
/// of the struct. For example, consider:
@@ -174,7 +174,7 @@ private:
/// the passed size.
void AppendTailPadding(CharUnits RecordSize);
- CharUnits getTypeAlignment(const llvm::Type *Ty) const;
+ CharUnits getTypeAlignment(llvm::Type *Ty) const;
/// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
/// LLVM element types.
@@ -230,7 +230,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
uint64_t FieldSize,
uint64_t ContainingTypeSizeInBits,
unsigned ContainingTypeAlign) {
- const llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
+ llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
@@ -363,15 +363,14 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
uint64_t fieldOffset) {
- uint64_t fieldSize =
- D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+ uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
if (fieldSize == 0)
return;
uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
CharUnits numBytesToAppend;
- unsigned charAlign = Types.getContext().Target.getCharAlign();
+ unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
assert(fieldOffset % charAlign == 0 &&
@@ -492,8 +491,7 @@ llvm::Type *
CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
const ASTRecordLayout &Layout) {
if (Field->isBitField()) {
- uint64_t FieldSize =
- Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+ uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
// Ignore zero sized bit fields.
if (FieldSize == 0)
@@ -502,7 +500,7 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
llvm::RoundUpToAlignment(FieldSize,
- Types.getContext().Target.getCharAlign()));
+ Types.getContext().getTargetInfo().getCharAlign()));
if (NumBytesToAppend > CharUnits::One())
FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
@@ -672,10 +670,10 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
// Check if we need to add a vtable pointer.
if (RD->isDynamicClass()) {
if (!PrimaryBase) {
- const llvm::Type *FunctionType =
+ llvm::Type *FunctionType =
llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
/*isVarArg=*/true);
- const llvm::Type *VTableTy = FunctionType->getPointerTo();
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
assert(NextFieldOffset.isZero() &&
"VTable pointer must come first!");
@@ -735,8 +733,8 @@ CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
}
- BaseSubobjectType = llvm::StructType::createNamed(Types.getLLVMContext(), "",
- FieldTypes, Packed);
+ BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
+ FieldTypes, "", Packed);
Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
// Pull the padding back off.
@@ -882,7 +880,7 @@ void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
AppendField(NextFieldOffset, getByteArrayType(numBytes));
}
-CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
if (Packed)
return CharUnits::One();
@@ -983,7 +981,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
}
// Verify that the LLVM and AST field offsets agree.
- const llvm::StructType *ST =
+ llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
@@ -1037,7 +1035,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
return RL;
}
-void CGRecordLayout::print(llvm::raw_ostream &OS) const {
+void CGRecordLayout::print(raw_ostream &OS) const {
OS << "<CGRecordLayout\n";
OS << " LLVMType:" << *CompleteObjectType << "\n";
if (BaseSubobjectType)
@@ -1071,7 +1069,7 @@ void CGRecordLayout::dump() const {
print(llvm::errs());
}
-void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
+void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo";
OS << " Size:" << Size;
OS << " IsSigned:" << IsSigned << "\n";
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 07bddb797265..c56931bbc6fa 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -31,20 +31,19 @@ using namespace CodeGen;
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
+ SourceLocation Loc;
if (isa<DeclStmt>(S))
- DI->setLocation(S->getLocEnd());
+ Loc = S->getLocEnd();
else
- DI->setLocation(S->getLocStart());
- DI->UpdateLineDirectiveRegion(Builder);
- DI->EmitStopPoint(Builder);
+ Loc = S->getLocStart();
+ DI->EmitLocation(Builder, Loc);
}
}
void CodeGenFunction::EmitStmt(const Stmt *S) {
assert(S && "Null statement?");
- // Check if we can handle this without bothering to generate an
- // insert point or debug info.
+ // These statements have their own debug info handling.
if (EmitSimpleStmt(S))
return;
@@ -137,11 +136,11 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
break;
case Stmt::ObjCAtCatchStmtClass:
- assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
- break;
+ llvm_unreachable(
+ "@catch statements should be handled by EmitObjCAtTryStmt");
case Stmt::ObjCAtFinallyStmtClass:
- assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
- break;
+ llvm_unreachable(
+ "@finally statements should be handled by EmitObjCAtTryStmt");
case Stmt::ObjCAtThrowStmtClass:
EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
break;
@@ -192,10 +191,8 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
"LLVM IR generation of compound statement ('{}')");
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getLBracLoc());
- DI->EmitRegionStart(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
@@ -204,10 +201,8 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- if (DI) {
- DI->setLocation(S.getRBracLoc());
- DI->EmitRegionEnd(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
RValue RV;
if (!GetLast)
@@ -286,6 +281,23 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
Builder.ClearInsertionPoint();
}
+void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
+ bool inserted = false;
+ for (llvm::BasicBlock::use_iterator
+ i = block->use_begin(), e = block->use_end(); i != e; ++i) {
+ if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
+ CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
+ inserted = true;
+ break;
+ }
+ }
+
+ if (!inserted)
+ CurFn->getBasicBlockList().push_back(block);
+
+ Builder.SetInsertPoint(block);
+}
+
CodeGenFunction::JumpDest
CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
JumpDest &Dest = LabelMap[D];
@@ -555,10 +567,8 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
RunCleanupsScope ForScope(*this);
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getSourceRange().getBegin());
- DI->EmitRegionStart(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
// Evaluate the first part before the loop.
if (S.getInit())
@@ -637,10 +647,8 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
ForScope.ForceCleanup();
- if (DI) {
- DI->setLocation(S.getSourceRange().getEnd());
- DI->EmitRegionEnd(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
@@ -652,10 +660,8 @@ void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
RunCleanupsScope ForScope(*this);
CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getSourceRange().getBegin());
- DI->EmitRegionStart(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
// Evaluate the first pieces before the loop.
EmitStmt(S.getRangeStmt());
@@ -711,10 +717,8 @@ void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
ForScope.ForceCleanup();
- if (DI) {
- DI->setLocation(S.getSourceRange().getEnd());
- DI->EmitRegionEnd(Builder);
- }
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock(), true);
@@ -767,7 +771,10 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (RV->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(RV, ReturnValue, false);
} else {
- EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(), true));
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
}
EmitBranchThroughCleanup(ReturnBlock);
@@ -816,8 +823,8 @@ void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
assert(S.getRHS() && "Expected RHS value in CaseStmt");
- llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
- llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+ llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
// Emit the code for this case. We do this first to make sure it is
// properly chained from our predecessor before generating the
@@ -856,7 +863,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
// Emit range check.
llvm::Value *Diff =
- Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS), "tmp");
+ Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
llvm::Value *Cond =
Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
@@ -876,7 +883,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
}
llvm::ConstantInt *CaseVal =
- Builder.getInt(S.getLHS()->EvaluateAsInt(getContext()));
+ Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
// If the body of the case is just a 'break', and if there was no fallthrough,
// try to not emit an empty block.
@@ -917,7 +924,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
while (NextCase && NextCase->getRHS() == 0) {
CurCase = NextCase;
llvm::ConstantInt *CaseVal =
- Builder.getInt(CurCase->getLHS()->EvaluateAsInt(getContext()));
+ Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
SwitchInsn->addCase(CaseVal, CaseDest);
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
@@ -961,7 +968,7 @@ enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
static CSFC_Result CollectStatementsForCase(const Stmt *S,
const SwitchCase *Case,
bool &FoundCase,
- llvm::SmallVectorImpl<const Stmt*> &ResultStmts) {
+ SmallVectorImpl<const Stmt*> &ResultStmts) {
// If this is a null statement, just succeed.
if (S == 0)
return Case ? CSFC_Success : CSFC_FallThrough;
@@ -1086,7 +1093,7 @@ static CSFC_Result CollectStatementsForCase(const Stmt *S,
/// for more details.
static bool FindCaseStatementsForValue(const SwitchStmt &S,
const llvm::APInt &ConstantCondValue,
- llvm::SmallVectorImpl<const Stmt*> &ResultStmts,
+ SmallVectorImpl<const Stmt*> &ResultStmts,
ASTContext &C) {
// First step, find the switch case that is being branched to. We can do this
// efficiently by scanning the SwitchCase list.
@@ -1107,7 +1114,7 @@ static bool FindCaseStatementsForValue(const SwitchStmt &S,
if (CS->getRHS()) return false;
// If we found our case, remember it as 'case'.
- if (CS->getLHS()->EvaluateAsInt(C) == ConstantCondValue)
+ if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
break;
}
@@ -1147,7 +1154,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// emit the live case statement (if any) of the switch.
llvm::APInt ConstantCondValue;
if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
- llvm::SmallVector<const Stmt*, 4> CaseStmts;
+ SmallVector<const Stmt*, 4> CaseStmts;
if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
getContext())) {
RunCleanupsScope ExecutedScope(*this);
@@ -1219,7 +1226,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
static std::string
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
- llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
while (*Constraint) {
@@ -1276,7 +1283,7 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
if (!Attr)
return Constraint;
- llvm::StringRef Register = Attr->getLabel();
+ StringRef Register = Attr->getLabel();
assert(Target.isValidGCCRegisterName(Register));
// We're using validateOutputConstraint here because we only care if
// this is a register constraint.
@@ -1301,7 +1308,7 @@ CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
Arg = EmitLoadOfLValue(InputValue).getScalarVal();
} else {
- const llvm::Type *Ty = ConvertType(InputType);
+ llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
@@ -1341,11 +1348,11 @@ llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
/// asm.
static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
CodeGenFunction &CGF) {
- llvm::SmallVector<llvm::Value *, 8> Locs;
+ SmallVector<llvm::Value *, 8> Locs;
// Add the location of the first line to the MDNode.
Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
Str->getLocStart().getRawEncoding()));
- llvm::StringRef StrVal = Str->getString();
+ StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
const LangOptions &LangOpts = CGF.CGM.getLangOptions();
@@ -1367,7 +1374,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
- llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
unsigned DiagOffs;
S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
@@ -1384,8 +1391,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
// Get all the output and input constraints together.
- llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
- llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
@@ -1530,14 +1537,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Use ptrtoint as appropriate so that we can do our extension.
if (isa<llvm::PointerType>(Arg->getType()))
Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
- const llvm::Type *OutputTy = ConvertType(OutputType);
+ llvm::Type *OutputTy = ConvertType(OutputType);
if (isa<llvm::IntegerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, OutputTy);
- else
+ else if (isa<llvm::PointerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, IntPtrTy);
+ else {
+ assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
}
}
- if (const llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
@@ -1556,7 +1567,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
- llvm::StringRef Clobber = S.getClobber(i)->getString();
+ StringRef Clobber = S.getClobber(i)->getString();
if (Clobber != "memory" && Clobber != "cc")
Clobber = Target.getNormalizedGCCRegisterName(Clobber);
@@ -1577,7 +1588,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Constraints += MachineClobbers;
}
- const llvm::Type *ResultType;
+ llvm::Type *ResultType;
if (ResultRegTypes.empty())
ResultType = llvm::Type::getVoidTy(getLLVMContext());
else if (ResultRegTypes.size() == 1)
@@ -1585,7 +1596,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
else
ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
llvm::InlineAsm *IA =
@@ -1615,7 +1626,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// If the result type of the LLVM IR asm doesn't match the result type of
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
- const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
// Truncate the integer result to the right size, note that TruncTy can be
// a pointer.
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index cec02cdfc235..ea7b8cb49794 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -14,383 +14,81 @@
#include "CodeGenModule.h"
#include "CGCXXABI.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/VTTBuilder.h"
using namespace clang;
using namespace CodeGen;
#define D1(x)
-namespace {
-
-/// VTT builder - Class for building VTT layout information.
-class VTTBuilder {
-
- CodeGenModule &CGM;
-
- /// MostDerivedClass - The most derived class for which we're building this
- /// vtable.
- const CXXRecordDecl *MostDerivedClass;
-
- typedef llvm::SmallVector<llvm::Constant *, 64> VTTComponentsVectorTy;
-
- /// VTTComponents - The VTT components.
- VTTComponentsVectorTy VTTComponents;
-
- /// MostDerivedClassLayout - the AST record layout of the most derived class.
- const ASTRecordLayout &MostDerivedClassLayout;
-
- typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
-
- typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
-
- /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
- /// class.
- llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
-
- /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
- /// all subobjects of the most derived class.
- llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices;
-
- /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
- /// the VTT.
- bool GenerateDefinition;
-
- /// The linkage to use for any construction vtables required by this VTT.
- /// Only required if we're building a definition.
- llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables;
-
- /// GetAddrOfVTable - Returns the address of the vtable for the base class in
- /// the given vtable class.
- ///
- /// \param AddressPoints - If the returned vtable is a construction vtable,
- /// this will hold the address points for it.
- llvm::Constant *GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
- AddressPointsMapTy& AddressPoints);
-
- /// AddVTablePointer - Add a vtable pointer to the VTT currently being built.
- ///
- /// \param AddressPoints - If the vtable is a construction vtable, this has
- /// the address points for it.
- void AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass,
- const AddressPointsMapTy& AddressPoints);
-
- /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base
- /// subobject.
- void LayoutSecondaryVTTs(BaseSubobject Base);
-
- /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
- /// for the given base subobject.
- ///
- /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
- /// or a direct or indirect base of a virtual base.
- ///
- /// \param AddressPoints - If the vtable is a construction vtable, this has
- /// the address points for it.
- void LayoutSecondaryVirtualPointers(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
- llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass,
- const AddressPointsMapTy& AddressPoints,
- VisitedVirtualBasesSetTy &VBases);
-
- /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
- /// for the given base subobject.
- ///
- /// \param AddressPoints - If the vtable is a construction vtable, this has
- /// the address points for it.
- void LayoutSecondaryVirtualPointers(BaseSubobject Base,
- llvm::Constant *VTable,
- const AddressPointsMapTy& AddressPoints);
-
- /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the
- /// given record decl.
- void LayoutVirtualVTTs(const CXXRecordDecl *RD,
- VisitedVirtualBasesSetTy &VBases);
-
- /// LayoutVTT - Will lay out the VTT for the given subobject, including any
- /// secondary VTTs, secondary virtual pointers and virtual VTTs.
- void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual);
-
-public:
- VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass,
- bool GenerateDefinition,
- llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables
- = (llvm::GlobalVariable::LinkageTypes) -1);
-
- // getVTTComponents - Returns a reference to the VTT components.
- const VTTComponentsVectorTy &getVTTComponents() const {
- return VTTComponents;
- }
-
- /// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
- const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
- return SubVTTIndicies;
- }
-
- /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary
- /// virtual pointer indices.
- const llvm::DenseMap<BaseSubobject, uint64_t> &
- getSecondaryVirtualPointerIndices() const {
- return SecondaryVirtualPointerIndices;
- }
-
-};
-
-VTTBuilder::VTTBuilder(CodeGenModule &CGM,
- const CXXRecordDecl *MostDerivedClass,
- bool GenerateDefinition,
- llvm::GlobalVariable::LinkageTypes LinkageForConstructionVTables)
- : CGM(CGM), MostDerivedClass(MostDerivedClass),
- MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)),
- GenerateDefinition(GenerateDefinition),
- LinkageForConstructionVTables(LinkageForConstructionVTables) {
- assert(!GenerateDefinition ||
- LinkageForConstructionVTables
- != (llvm::GlobalVariable::LinkageTypes) -1);
-
- // Lay out this VTT.
- LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
- /*BaseIsVirtual=*/false);
-}
-
-llvm::Constant *
-VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
- AddressPointsMapTy& AddressPoints) {
- if (!GenerateDefinition)
- return 0;
-
- if (Base.getBase() == MostDerivedClass) {
- assert(Base.getBaseOffset().isZero() &&
+llvm::Constant *GetAddrOfVTTVTable(CodeGenVTables &CGVT,
+ const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+ if (VTable.getBase() == MostDerivedClass) {
+ assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
// This is a regular vtable.
- return CGM.getVTables().GetAddrOfVTable(MostDerivedClass);
+ return CGVT.GetAddrOfVTable(MostDerivedClass);
}
- return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass,
- Base, BaseIsVirtual,
- LinkageForConstructionVTables,
- AddressPoints);
+ return CGVT.GenerateConstructionVTable(MostDerivedClass,
+ VTable.getBaseSubobject(),
+ VTable.isVirtual(),
+ Linkage,
+ AddressPoints);
}
-void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass,
- const AddressPointsMapTy& AddressPoints) {
- // Store the vtable pointer index if we're generating the primary VTT.
- if (VTableClass == MostDerivedClass) {
- assert(!SecondaryVirtualPointerIndices.count(Base) &&
- "A virtual pointer index already exists for this base subobject!");
- SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
- }
-
- if (!GenerateDefinition) {
- VTTComponents.push_back(0);
- return;
- }
-
- uint64_t AddressPoint;
- if (VTableClass != MostDerivedClass) {
- // The vtable is a construction vtable, look in the construction vtable
- // address points.
- AddressPoint = AddressPoints.lookup(Base);
- assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
- } else {
- // Just get the address point for the regular vtable.
- AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
- assert(AddressPoint != 0 && "Did not find vtable address point!");
- }
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
- if (!AddressPoint) AddressPoint = 0;
-
- llvm::Value *Idxs[] = {
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0),
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()),
- AddressPoint)
- };
-
- llvm::Constant *Init =
- llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2);
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()),
+ *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
- VTTComponents.push_back(Init);
-}
-
-void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
- const CXXRecordDecl *RD = Base.getBase();
-
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
-
- // Don't layout virtual bases.
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- CharUnits BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
-
- // Layout the VTT for this base.
- LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ SmallVector<llvm::Constant *, 8> VTables;
+ SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
+ for (const VTTVTable *i = Builder.getVTTVTables().begin(),
+ *e = Builder.getVTTVTables().end(); i != e; ++i) {
+ VTableAddressPoints.push_back(VTableAddressPointsMapTy());
+ VTables.push_back(GetAddrOfVTTVTable(*this, RD, *i, Linkage,
+ VTableAddressPoints.back()));
}
-}
-
-void
-VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
- llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass,
- const AddressPointsMapTy& AddressPoints,
- VisitedVirtualBasesSetTy &VBases) {
- const CXXRecordDecl *RD = Base.getBase();
-
- // We're not interested in bases that don't have virtual bases, and not
- // morally virtual bases.
- if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
- return;
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Itanium C++ ABI 2.6.2:
- // Secondary virtual pointers are present for all bases with either
- // virtual bases or virtual function declarations overridden along a
- // virtual path.
- //
- // If the base class is not dynamic, we don't want to add it, nor any
- // of its base classes.
- if (!BaseDecl->isDynamicClass())
- continue;
-
- bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
- bool BaseDeclIsNonVirtualPrimaryBase = false;
- CharUnits BaseOffset;
- if (I->isVirtual()) {
- // Ignore virtual bases that we've already visited.
- if (!VBases.insert(BaseDecl))
- continue;
-
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
- BaseDeclIsMorallyVirtual = true;
+ SmallVector<llvm::Constant *, 8> VTTComponents;
+ for (const VTTComponent *i = Builder.getVTTComponents().begin(),
+ *e = Builder.getVTTComponents().end(); i != e; ++i) {
+ const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
+ llvm::Constant *VTable = VTables[i->VTableIndex];
+ uint64_t AddressPoint;
+ if (VTTVT.getBase() == RD) {
+ // Just get the address point for the regular vtable.
+ AddressPoint = VTContext.getVTableLayout(RD)
+ .getAddressPoint(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
} else {
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
- BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
-
- if (!Layout.isPrimaryBaseVirtual() &&
- Layout.getPrimaryBase() == BaseDecl)
- BaseDeclIsNonVirtualPrimaryBase = true;
+ AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
}
- // Itanium C++ ABI 2.6.2:
- // Secondary virtual pointers: for each base class X which (a) has virtual
- // bases or is reachable along a virtual path from D, and (b) is not a
- // non-virtual primary base, the address of the virtual table for X-in-D
- // or an appropriate construction virtual table.
- if (!BaseDeclIsNonVirtualPrimaryBase &&
- (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
- // Add the vtable pointer.
- AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable,
- VTableClass, AddressPoints);
- }
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(Int64Ty, 0),
+ llvm::ConstantInt::get(Int64Ty, AddressPoint)
+ };
- // And lay out the secondary virtual pointers for the base class.
- LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
- BaseDeclIsMorallyVirtual, VTable,
- VTableClass, AddressPoints, VBases);
- }
-}
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs);
-void
-VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
- llvm::Constant *VTable,
- const AddressPointsMapTy& AddressPoints) {
- VisitedVirtualBasesSetTy VBases;
- LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
- VTable, Base.getBase(), AddressPoints, VBases);
-}
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
-void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
- VisitedVirtualBasesSetTy &VBases) {
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Check if this is a virtual base.
- if (I->isVirtual()) {
- // Check if we've seen this base before.
- if (!VBases.insert(BaseDecl))
- continue;
-
- CharUnits BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
-
- LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
- }
-
- // We only need to layout virtual VTTs for this base if it actually has
- // virtual bases.
- if (BaseDecl->getNumVBases())
- LayoutVirtualVTTs(BaseDecl, VBases);
+ VTTComponents.push_back(Init);
}
-}
-void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
- const CXXRecordDecl *RD = Base.getBase();
-
- // Itanium C++ ABI 2.6.2:
- // An array of virtual table addresses, called the VTT, is declared for
- // each class type that has indirect or direct virtual base classes.
- if (RD->getNumVBases() == 0)
- return;
-
- bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
-
- if (!IsPrimaryVTT) {
- // Remember the sub-VTT index.
- SubVTTIndicies[Base] = VTTComponents.size();
- }
-
- AddressPointsMapTy AddressPoints;
- llvm::Constant *VTable = GetAddrOfVTable(Base, BaseIsVirtual, AddressPoints);
-
- // Add the primary vtable pointer.
- AddVTablePointer(Base, VTable, RD, AddressPoints);
-
- // Add the secondary VTTs.
- LayoutSecondaryVTTs(Base);
-
- // Add the secondary virtual pointers.
- LayoutSecondaryVirtualPointers(Base, VTable, AddressPoints);
-
- // If this is the primary VTT, we want to lay out virtual VTTs as well.
- if (IsPrimaryVTT) {
- VisitedVirtualBasesSetTy VBases;
- LayoutVirtualVTTs(Base.getBase(), VBases);
- }
-}
-
-}
-
-void
-CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
- llvm::GlobalVariable::LinkageTypes Linkage,
- const CXXRecordDecl *RD) {
- VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/true, Linkage);
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
-
- llvm::Constant *Init =
- llvm::ConstantArray::get(ArrayType, Builder.getVTTComponents());
+ llvm::Constant *Init = llvm::ConstantArray::get(ArrayType, VTTComponents);
VTT->setInitializer(Init);
@@ -408,15 +106,16 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
- ComputeVTableRelatedInformation(RD, /*VTableRequired=*/true);
+ // This will also defer the definition of the VTT.
+ (void) GetAddrOfVTable(RD);
- VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- const llvm::Type *Int8PtrTy =
+ llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::ArrayType *ArrayType =
+ llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
llvm::GlobalVariable *GV =
@@ -452,7 +151,7 @@ uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
if (I != SubVTTIndicies.end())
return I->second;
- VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
Builder.getSubVTTIndicies().begin(),
@@ -478,7 +177,7 @@ CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
if (I != SecondaryVirtualPointerIndices.end())
return I->second;
- VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
// Insert all secondary vpointer indices.
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index c161b79fd3a0..a306c857e5aa 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -28,2399 +28,8 @@
using namespace clang;
using namespace CodeGen;
-namespace {
-
-/// BaseOffset - Represents an offset from a derived class to a direct or
-/// indirect base class.
-struct BaseOffset {
- /// DerivedClass - The derived class.
- const CXXRecordDecl *DerivedClass;
-
- /// VirtualBase - If the path from the derived class to the base class
- /// involves a virtual base class, this holds its declaration.
- const CXXRecordDecl *VirtualBase;
-
- /// NonVirtualOffset - The offset from the derived class to the base class.
- /// (Or the offset from the virtual base class to the base class, if the
- /// path from the derived class to the base class involves a virtual base
- /// class.
- CharUnits NonVirtualOffset;
-
- BaseOffset() : DerivedClass(0), VirtualBase(0),
- NonVirtualOffset(CharUnits::Zero()) { }
- BaseOffset(const CXXRecordDecl *DerivedClass,
- const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
- : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
- NonVirtualOffset(NonVirtualOffset) { }
-
- bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
-};
-
-/// FinalOverriders - Contains the final overrider member functions for all
-/// member functions in the base subobjects of a class.
-class FinalOverriders {
-public:
- /// OverriderInfo - Information about a final overrider.
- struct OverriderInfo {
- /// Method - The method decl of the overrider.
- const CXXMethodDecl *Method;
-
- /// Offset - the base offset of the overrider in the layout class.
- CharUnits Offset;
-
- OverriderInfo() : Method(0), Offset(CharUnits::Zero()) { }
- };
-
-private:
- /// MostDerivedClass - The most derived class for which the final overriders
- /// are stored.
- const CXXRecordDecl *MostDerivedClass;
-
- /// MostDerivedClassOffset - If we're building final overriders for a
- /// construction vtable, this holds the offset from the layout class to the
- /// most derived class.
- const CharUnits MostDerivedClassOffset;
-
- /// LayoutClass - The class we're using for layout information. Will be
- /// different than the most derived class if the final overriders are for a
- /// construction vtable.
- const CXXRecordDecl *LayoutClass;
-
- ASTContext &Context;
-
- /// MostDerivedClassLayout - the AST record layout of the most derived class.
- const ASTRecordLayout &MostDerivedClassLayout;
-
- /// MethodBaseOffsetPairTy - Uniquely identifies a member function
- /// in a base subobject.
- typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
-
- typedef llvm::DenseMap<MethodBaseOffsetPairTy,
- OverriderInfo> OverridersMapTy;
-
- /// OverridersMap - The final overriders for all virtual member functions of
- /// all the base subobjects of the most derived class.
- OverridersMapTy OverridersMap;
-
- /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
- /// as a record decl and a subobject number) and its offsets in the most
- /// derived class as well as the layout class.
- typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
- CharUnits> SubobjectOffsetMapTy;
-
- typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
-
- /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
- /// given base.
- void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
- CharUnits OffsetInLayoutClass,
- SubobjectOffsetMapTy &SubobjectOffsets,
- SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
- SubobjectCountMapTy &SubobjectCounts);
-
- typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
-
- /// dump - dump the final overriders for a base subobject, and all its direct
- /// and indirect base subobjects.
- void dump(llvm::raw_ostream &Out, BaseSubobject Base,
- VisitedVirtualBasesSetTy& VisitedVirtualBases);
-
-public:
- FinalOverriders(const CXXRecordDecl *MostDerivedClass,
- CharUnits MostDerivedClassOffset,
- const CXXRecordDecl *LayoutClass);
-
- /// getOverrider - Get the final overrider for the given method declaration in
- /// the subobject with the given base offset.
- OverriderInfo getOverrider(const CXXMethodDecl *MD,
- CharUnits BaseOffset) const {
- assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
- "Did not find overrider!");
-
- return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
- }
-
- /// dump - dump the final overriders.
- void dump() {
- VisitedVirtualBasesSetTy VisitedVirtualBases;
- dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
- VisitedVirtualBases);
- }
-
-};
-
-#define DUMP_OVERRIDERS 0
-
-FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
- CharUnits MostDerivedClassOffset,
- const CXXRecordDecl *LayoutClass)
- : MostDerivedClass(MostDerivedClass),
- MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
- Context(MostDerivedClass->getASTContext()),
- MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
-
- // Compute base offsets.
- SubobjectOffsetMapTy SubobjectOffsets;
- SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
- SubobjectCountMapTy SubobjectCounts;
- ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
- /*IsVirtual=*/false,
- MostDerivedClassOffset,
- SubobjectOffsets, SubobjectLayoutClassOffsets,
- SubobjectCounts);
-
- // Get the the final overriders.
- CXXFinalOverriderMap FinalOverriders;
- MostDerivedClass->getFinalOverriders(FinalOverriders);
-
- for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
- E = FinalOverriders.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const OverridingMethods& Methods = I->second;
-
- for (OverridingMethods::const_iterator I = Methods.begin(),
- E = Methods.end(); I != E; ++I) {
- unsigned SubobjectNumber = I->first;
- assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
- SubobjectNumber)) &&
- "Did not find subobject offset!");
-
- CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
- SubobjectNumber)];
-
- assert(I->second.size() == 1 && "Final overrider is not unique!");
- const UniqueVirtualMethod &Method = I->second.front();
-
- const CXXRecordDecl *OverriderRD = Method.Method->getParent();
- assert(SubobjectLayoutClassOffsets.count(
- std::make_pair(OverriderRD, Method.Subobject))
- && "Did not find subobject offset!");
- CharUnits OverriderOffset =
- SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
- Method.Subobject)];
-
- OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
- assert(!Overrider.Method && "Overrider should not exist yet!");
-
- Overrider.Offset = OverriderOffset;
- Overrider.Method = Method.Method;
- }
- }
-
-#if DUMP_OVERRIDERS
- // And dump them (for now).
- dump();
-#endif
-}
-
-static BaseOffset ComputeBaseOffset(ASTContext &Context,
- const CXXRecordDecl *DerivedRD,
- const CXXBasePath &Path) {
- CharUnits NonVirtualOffset = CharUnits::Zero();
-
- unsigned NonVirtualStart = 0;
- const CXXRecordDecl *VirtualBase = 0;
-
- // First, look for the virtual base class.
- for (unsigned I = 0, E = Path.size(); I != E; ++I) {
- const CXXBasePathElement &Element = Path[I];
-
- if (Element.Base->isVirtual()) {
- // FIXME: Can we break when we find the first virtual base?
- // (If we can't, can't we just iterate over the path in reverse order?)
- NonVirtualStart = I + 1;
- QualType VBaseType = Element.Base->getType();
- VirtualBase =
- cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
- }
- }
-
- // Now compute the non-virtual offset.
- for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
- const CXXBasePathElement &Element = Path[I];
-
- // Check the base class offset.
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
-
- const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
- const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
-
- NonVirtualOffset += Layout.getBaseClassOffset(Base);
- }
-
- // FIXME: This should probably use CharUnits or something. Maybe we should
- // even change the base offsets in ASTRecordLayout to be specified in
- // CharUnits.
- return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
-
-}
-
-static BaseOffset ComputeBaseOffset(ASTContext &Context,
- const CXXRecordDecl *BaseRD,
- const CXXRecordDecl *DerivedRD) {
- CXXBasePaths Paths(/*FindAmbiguities=*/false,
- /*RecordPaths=*/true, /*DetectVirtual=*/false);
-
- if (!const_cast<CXXRecordDecl *>(DerivedRD)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return BaseOffset();
- }
-
- return ComputeBaseOffset(Context, DerivedRD, Paths.front());
-}
-
-static BaseOffset
-ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
- const CXXMethodDecl *DerivedMD,
- const CXXMethodDecl *BaseMD) {
- const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
- const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
-
- // Canonicalize the return types.
- CanQualType CanDerivedReturnType =
- Context.getCanonicalType(DerivedFT->getResultType());
- CanQualType CanBaseReturnType =
- Context.getCanonicalType(BaseFT->getResultType());
-
- assert(CanDerivedReturnType->getTypeClass() ==
- CanBaseReturnType->getTypeClass() &&
- "Types must have same type class!");
-
- if (CanDerivedReturnType == CanBaseReturnType) {
- // No adjustment needed.
- return BaseOffset();
- }
-
- if (isa<ReferenceType>(CanDerivedReturnType)) {
- CanDerivedReturnType =
- CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
- CanBaseReturnType =
- CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
- } else if (isa<PointerType>(CanDerivedReturnType)) {
- CanDerivedReturnType =
- CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
- CanBaseReturnType =
- CanBaseReturnType->getAs<PointerType>()->getPointeeType();
- } else {
- assert(false && "Unexpected return type!");
- }
-
- // We need to compare unqualified types here; consider
- // const T *Base::foo();
- // T *Derived::foo();
- if (CanDerivedReturnType.getUnqualifiedType() ==
- CanBaseReturnType.getUnqualifiedType()) {
- // No adjustment needed.
- return BaseOffset();
- }
-
- const CXXRecordDecl *DerivedRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
-
- const CXXRecordDecl *BaseRD =
- cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
-
- return ComputeBaseOffset(Context, BaseRD, DerivedRD);
-}
-
-void
-FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
- CharUnits OffsetInLayoutClass,
- SubobjectOffsetMapTy &SubobjectOffsets,
- SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
- SubobjectCountMapTy &SubobjectCounts) {
- const CXXRecordDecl *RD = Base.getBase();
-
- unsigned SubobjectNumber = 0;
- if (!IsVirtual)
- SubobjectNumber = ++SubobjectCounts[RD];
-
- // Set up the subobject to offset mapping.
- assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
- && "Subobject offset already exists!");
- assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
- && "Subobject offset already exists!");
-
- SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
- SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
- OffsetInLayoutClass;
-
- // Traverse our bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- CharUnits BaseOffset;
- CharUnits BaseOffsetInLayoutClass;
- if (I->isVirtual()) {
- // Check if we've visited this virtual base before.
- if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
- continue;
-
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
- BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
- } else {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
-
- BaseOffset = Base.getBaseOffset() + Offset;
- BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
- }
-
- ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
- I->isVirtual(), BaseOffsetInLayoutClass,
- SubobjectOffsets, SubobjectLayoutClassOffsets,
- SubobjectCounts);
- }
-}
-
-void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base,
- VisitedVirtualBasesSetTy &VisitedVirtualBases) {
- const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore bases that don't have any virtual member functions.
- if (!BaseDecl->isPolymorphic())
- continue;
-
- CharUnits BaseOffset;
- if (I->isVirtual()) {
- if (!VisitedVirtualBases.insert(BaseDecl)) {
- // We've visited this base before.
- continue;
- }
-
- BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
- } else {
- BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
- }
-
- dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
- }
-
- Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
- Out << Base.getBaseOffset().getQuantity() << ")\n";
-
- // Now dump the overriders for this base subobject.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
- if (!MD->isVirtual())
- continue;
-
- OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
-
- Out << " " << MD->getQualifiedNameAsString() << " - (";
- Out << Overrider.Method->getQualifiedNameAsString();
- Out << ", " << ", " << Overrider.Offset.getQuantity() << ')';
-
- BaseOffset Offset;
- if (!Overrider.Method->isPure())
- Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
-
- if (!Offset.isEmpty()) {
- Out << " [ret-adj: ";
- if (Offset.VirtualBase)
- Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
-
- Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
- }
-
- Out << "\n";
- }
-}
-
-/// VTableComponent - Represents a single component in a vtable.
-class VTableComponent {
-public:
- enum Kind {
- CK_VCallOffset,
- CK_VBaseOffset,
- CK_OffsetToTop,
- CK_RTTI,
- CK_FunctionPointer,
-
- /// CK_CompleteDtorPointer - A pointer to the complete destructor.
- CK_CompleteDtorPointer,
-
- /// CK_DeletingDtorPointer - A pointer to the deleting destructor.
- CK_DeletingDtorPointer,
-
- /// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
- /// will end up never being called. Such vtable function pointers are
- /// represented as a CK_UnusedFunctionPointer.
- CK_UnusedFunctionPointer
- };
-
- static VTableComponent MakeVCallOffset(CharUnits Offset) {
- return VTableComponent(CK_VCallOffset, Offset);
- }
-
- static VTableComponent MakeVBaseOffset(CharUnits Offset) {
- return VTableComponent(CK_VBaseOffset, Offset);
- }
-
- static VTableComponent MakeOffsetToTop(CharUnits Offset) {
- return VTableComponent(CK_OffsetToTop, Offset);
- }
-
- static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
- return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
- }
-
- static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
- assert(!isa<CXXDestructorDecl>(MD) &&
- "Don't use MakeFunction with destructors!");
-
- return VTableComponent(CK_FunctionPointer,
- reinterpret_cast<uintptr_t>(MD));
- }
-
- static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
- return VTableComponent(CK_CompleteDtorPointer,
- reinterpret_cast<uintptr_t>(DD));
- }
-
- static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
- return VTableComponent(CK_DeletingDtorPointer,
- reinterpret_cast<uintptr_t>(DD));
- }
-
- static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
- assert(!isa<CXXDestructorDecl>(MD) &&
- "Don't use MakeUnusedFunction with destructors!");
- return VTableComponent(CK_UnusedFunctionPointer,
- reinterpret_cast<uintptr_t>(MD));
- }
-
- static VTableComponent getFromOpaqueInteger(uint64_t I) {
- return VTableComponent(I);
- }
-
- /// getKind - Get the kind of this vtable component.
- Kind getKind() const {
- return (Kind)(Value & 0x7);
- }
-
- CharUnits getVCallOffset() const {
- assert(getKind() == CK_VCallOffset && "Invalid component kind!");
-
- return getOffset();
- }
-
- CharUnits getVBaseOffset() const {
- assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
-
- return getOffset();
- }
-
- CharUnits getOffsetToTop() const {
- assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
-
- return getOffset();
- }
-
- const CXXRecordDecl *getRTTIDecl() const {
- assert(getKind() == CK_RTTI && "Invalid component kind!");
-
- return reinterpret_cast<CXXRecordDecl *>(getPointer());
- }
-
- const CXXMethodDecl *getFunctionDecl() const {
- assert(getKind() == CK_FunctionPointer);
-
- return reinterpret_cast<CXXMethodDecl *>(getPointer());
- }
-
- const CXXDestructorDecl *getDestructorDecl() const {
- assert((getKind() == CK_CompleteDtorPointer ||
- getKind() == CK_DeletingDtorPointer) && "Invalid component kind!");
-
- return reinterpret_cast<CXXDestructorDecl *>(getPointer());
- }
-
- const CXXMethodDecl *getUnusedFunctionDecl() const {
- assert(getKind() == CK_UnusedFunctionPointer);
-
- return reinterpret_cast<CXXMethodDecl *>(getPointer());
- }
-
-private:
- VTableComponent(Kind ComponentKind, CharUnits Offset) {
- assert((ComponentKind == CK_VCallOffset ||
- ComponentKind == CK_VBaseOffset ||
- ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
- assert(Offset.getQuantity() <= ((1LL << 56) - 1) && "Offset is too big!");
-
- Value = ((Offset.getQuantity() << 3) | ComponentKind);
- }
-
- VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
- assert((ComponentKind == CK_RTTI ||
- ComponentKind == CK_FunctionPointer ||
- ComponentKind == CK_CompleteDtorPointer ||
- ComponentKind == CK_DeletingDtorPointer ||
- ComponentKind == CK_UnusedFunctionPointer) &&
- "Invalid component kind!");
-
- assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
-
- Value = Ptr | ComponentKind;
- }
-
- CharUnits getOffset() const {
- assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
- getKind() == CK_OffsetToTop) && "Invalid component kind!");
-
- return CharUnits::fromQuantity(Value >> 3);
- }
-
- uintptr_t getPointer() const {
- assert((getKind() == CK_RTTI ||
- getKind() == CK_FunctionPointer ||
- getKind() == CK_CompleteDtorPointer ||
- getKind() == CK_DeletingDtorPointer ||
- getKind() == CK_UnusedFunctionPointer) &&
- "Invalid component kind!");
-
- return static_cast<uintptr_t>(Value & ~7ULL);
- }
-
- explicit VTableComponent(uint64_t Value)
- : Value(Value) { }
-
- /// The kind is stored in the lower 3 bits of the value. For offsets, we
- /// make use of the facts that classes can't be larger than 2^55 bytes,
- /// so we store the offset in the lower part of the 61 bytes that remain.
- /// (The reason that we're not simply using a PointerIntPair here is that we
- /// need the offsets to be 64-bit, even when on a 32-bit machine).
- int64_t Value;
-};
-
-/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
-struct VCallOffsetMap {
-
- typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
-
- /// Offsets - Keeps track of methods and their offsets.
- // FIXME: This should be a real map and not a vector.
- llvm::SmallVector<MethodAndOffsetPairTy, 16> Offsets;
-
- /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
- /// can share the same vcall offset.
- static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
- const CXXMethodDecl *RHS);
-
-public:
- /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
- /// add was successful, or false if there was already a member function with
- /// the same signature in the map.
- bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
-
- /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
- /// vtable address point) for the given virtual member function.
- CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
-
- // empty - Return whether the offset map is empty or not.
- bool empty() const { return Offsets.empty(); }
-};
-
-static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
- const CXXMethodDecl *RHS) {
- ASTContext &C = LHS->getASTContext(); // TODO: thread this down
- CanQual<FunctionProtoType>
- LT = C.getCanonicalType(LHS->getType()).getAs<FunctionProtoType>(),
- RT = C.getCanonicalType(RHS->getType()).getAs<FunctionProtoType>();
-
- // Fast-path matches in the canonical types.
- if (LT == RT) return true;
-
- // Force the signatures to match. We can't rely on the overrides
- // list here because there isn't necessarily an inheritance
- // relationship between the two methods.
- if (LT.getQualifiers() != RT.getQualifiers() ||
- LT->getNumArgs() != RT->getNumArgs())
- return false;
- for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
- if (LT->getArgType(I) != RT->getArgType(I))
- return false;
- return true;
-}
-
-bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
- const CXXMethodDecl *RHS) {
- assert(LHS->isVirtual() && "LHS must be virtual!");
- assert(RHS->isVirtual() && "LHS must be virtual!");
-
- // A destructor can share a vcall offset with another destructor.
- if (isa<CXXDestructorDecl>(LHS))
- return isa<CXXDestructorDecl>(RHS);
-
- // FIXME: We need to check more things here.
-
- // The methods must have the same name.
- DeclarationName LHSName = LHS->getDeclName();
- DeclarationName RHSName = RHS->getDeclName();
- if (LHSName != RHSName)
- return false;
-
- // And the same signatures.
- return HasSameVirtualSignature(LHS, RHS);
-}
-
-bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
- CharUnits OffsetOffset) {
- // Check if we can reuse an offset.
- for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
- if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
- return false;
- }
-
- // Add the offset.
- Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
- return true;
-}
-
-CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
- // Look for an offset.
- for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
- if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
- return Offsets[I].second;
- }
-
- assert(false && "Should always find a vcall offset offset!");
- return CharUnits::Zero();
-}
-
-/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
-class VCallAndVBaseOffsetBuilder {
-public:
- typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
- VBaseOffsetOffsetsMapTy;
-
-private:
- /// MostDerivedClass - The most derived class for which we're building vcall
- /// and vbase offsets.
- const CXXRecordDecl *MostDerivedClass;
-
- /// LayoutClass - The class we're using for layout information. Will be
- /// different than the most derived class if we're building a construction
- /// vtable.
- const CXXRecordDecl *LayoutClass;
-
- /// Context - The ASTContext which we will use for layout information.
- ASTContext &Context;
-
- /// Components - vcall and vbase offset components
- typedef llvm::SmallVector<VTableComponent, 64> VTableComponentVectorTy;
- VTableComponentVectorTy Components;
-
- /// VisitedVirtualBases - Visited virtual bases.
- llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
-
- /// VCallOffsets - Keeps track of vcall offsets.
- VCallOffsetMap VCallOffsets;
-
-
- /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
- /// relative to the address point.
- VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
-
- /// FinalOverriders - The final overriders of the most derived class.
- /// (Can be null when we're not building a vtable of the most derived class).
- const FinalOverriders *Overriders;
-
- /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
- /// given base subobject.
- void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
- CharUnits RealBaseOffset);
-
- /// AddVCallOffsets - Add vcall offsets for the given base subobject.
- void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
-
- /// AddVBaseOffsets - Add vbase offsets for the given class.
- void AddVBaseOffsets(const CXXRecordDecl *Base,
- CharUnits OffsetInLayoutClass);
-
- /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
- /// chars, relative to the vtable address point.
- CharUnits getCurrentOffsetOffset() const;
-
-public:
- VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
- const CXXRecordDecl *LayoutClass,
- const FinalOverriders *Overriders,
- BaseSubobject Base, bool BaseIsVirtual,
- CharUnits OffsetInLayoutClass)
- : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
- Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
-
- // Add vcall and vbase offsets.
- AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
- }
-
- /// Methods for iterating over the components.
- typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
- const_iterator components_begin() const { return Components.rbegin(); }
- const_iterator components_end() const { return Components.rend(); }
-
- const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
- const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
- return VBaseOffsetOffsets;
- }
-};
-
-void
-VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
- bool BaseIsVirtual,
- CharUnits RealBaseOffset) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
-
- // Itanium C++ ABI 2.5.2:
- // ..in classes sharing a virtual table with a primary base class, the vcall
- // and vbase offsets added by the derived class all come before the vcall
- // and vbase offsets required by the base class, so that the latter may be
- // laid out as required by the base class without regard to additions from
- // the derived class(es).
-
- // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
- // emit them for the primary base first).
- if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
-
- CharUnits PrimaryBaseOffset;
-
- // Get the base offset of the primary base.
- if (PrimaryBaseIsVirtual) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary vbase should have a zero offset!");
-
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
-
- PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
- } else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary base should have a zero offset!");
-
- PrimaryBaseOffset = Base.getBaseOffset();
- }
-
- AddVCallAndVBaseOffsets(
- BaseSubobject(PrimaryBase,PrimaryBaseOffset),
- PrimaryBaseIsVirtual, RealBaseOffset);
- }
-
- AddVBaseOffsets(Base.getBase(), RealBaseOffset);
-
- // We only want to add vcall offsets for virtual bases.
- if (BaseIsVirtual)
- AddVCallOffsets(Base, RealBaseOffset);
-}
-
-CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
- // OffsetIndex is the index of this vcall or vbase offset, relative to the
- // vtable address point. (We subtract 3 to account for the information just
- // above the address point, the RTTI info, the offset to top, and the
- // vcall offset itself).
- int64_t OffsetIndex = -(int64_t)(3 + Components.size());
-
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
- CharUnits OffsetOffset = PointerWidth * OffsetIndex;
- return OffsetOffset;
-}
-
-void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
- CharUnits VBaseOffset) {
- const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- // Handle the primary base first.
- // We only want to add vcall offsets if the base is non-virtual; a virtual
- // primary base will have its vcall and vbase offsets emitted already.
- if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
- // Get the base offset of the primary base.
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary base should have a zero offset!");
-
- AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
- VBaseOffset);
- }
-
- // Add the vcall offsets.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
- if (!MD->isVirtual())
- continue;
-
- CharUnits OffsetOffset = getCurrentOffsetOffset();
-
- // Don't add a vcall offset if we already have one for this member function
- // signature.
- if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
- continue;
-
- CharUnits Offset = CharUnits::Zero();
-
- if (Overriders) {
- // Get the final overrider.
- FinalOverriders::OverriderInfo Overrider =
- Overriders->getOverrider(MD, Base.getBaseOffset());
-
- /// The vcall offset is the offset from the virtual base to the object
- /// where the function was overridden.
- Offset = Overrider.Offset - VBaseOffset;
- }
-
- Components.push_back(
- VTableComponent::MakeVCallOffset(Offset));
- }
-
- // And iterate over all non-virtual bases (ignoring the primary base).
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
-
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- if (BaseDecl == PrimaryBase)
- continue;
-
- // Get the base offset of this base.
- CharUnits BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
-
- AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
- VBaseOffset);
- }
-}
-
-void
-VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
- CharUnits OffsetInLayoutClass) {
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- // Add vbase offsets.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Check if this is a virtual base that we haven't visited before.
- if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
- CharUnits Offset =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
-
- // Add the vbase offset offset.
- assert(!VBaseOffsetOffsets.count(BaseDecl) &&
- "vbase offset offset already exists!");
-
- CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
- VBaseOffsetOffsets.insert(
- std::make_pair(BaseDecl, VBaseOffsetOffset));
-
- Components.push_back(
- VTableComponent::MakeVBaseOffset(Offset));
- }
-
- // Check the base class looking for more vbase offsets.
- AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
- }
-}
-
-/// VTableBuilder - Class for building vtable layout information.
-class VTableBuilder {
-public:
- /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
- /// primary bases.
- typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
- PrimaryBasesSetVectorTy;
-
- typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
- VBaseOffsetOffsetsMapTy;
-
- typedef llvm::DenseMap<BaseSubobject, uint64_t>
- AddressPointsMapTy;
-
-private:
- /// VTables - Global vtable information.
- CodeGenVTables &VTables;
-
- /// MostDerivedClass - The most derived class for which we're building this
- /// vtable.
- const CXXRecordDecl *MostDerivedClass;
-
- /// MostDerivedClassOffset - If we're building a construction vtable, this
- /// holds the offset from the layout class to the most derived class.
- const CharUnits MostDerivedClassOffset;
-
- /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
- /// base. (This only makes sense when building a construction vtable).
- bool MostDerivedClassIsVirtual;
-
- /// LayoutClass - The class we're using for layout information. Will be
- /// different than the most derived class if we're building a construction
- /// vtable.
- const CXXRecordDecl *LayoutClass;
-
- /// Context - The ASTContext which we will use for layout information.
- ASTContext &Context;
-
- /// FinalOverriders - The final overriders of the most derived class.
- const FinalOverriders Overriders;
-
- /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
- /// bases in this vtable.
- llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
-
- /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
- /// the most derived class.
- VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
-
- /// Components - The components of the vtable being built.
- llvm::SmallVector<VTableComponent, 64> Components;
-
- /// AddressPoints - Address points for the vtable being built.
- AddressPointsMapTy AddressPoints;
-
- /// MethodInfo - Contains information about a method in a vtable.
- /// (Used for computing 'this' pointer adjustment thunks.
- struct MethodInfo {
- /// BaseOffset - The base offset of this method.
- const CharUnits BaseOffset;
-
- /// BaseOffsetInLayoutClass - The base offset in the layout class of this
- /// method.
- const CharUnits BaseOffsetInLayoutClass;
-
- /// VTableIndex - The index in the vtable that this method has.
- /// (For destructors, this is the index of the complete destructor).
- const uint64_t VTableIndex;
-
- MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
- uint64_t VTableIndex)
- : BaseOffset(BaseOffset),
- BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
- VTableIndex(VTableIndex) { }
-
- MethodInfo()
- : BaseOffset(CharUnits::Zero()),
- BaseOffsetInLayoutClass(CharUnits::Zero()),
- VTableIndex(0) { }
- };
-
- typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
-
- /// MethodInfoMap - The information for all methods in the vtable we're
- /// currently building.
- MethodInfoMapTy MethodInfoMap;
-
- typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
-
- /// VTableThunks - The thunks by vtable index in the vtable currently being
- /// built.
- VTableThunksMapTy VTableThunks;
-
- typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
- typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
-
- /// Thunks - A map that contains all the thunks needed for all methods in the
- /// most derived class for which the vtable is currently being built.
- ThunksMapTy Thunks;
-
- /// AddThunk - Add a thunk for the given method.
- void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
-
- /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
- /// part of the vtable we're currently building.
- void ComputeThisAdjustments();
-
- typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
-
- /// PrimaryVirtualBases - All known virtual bases who are a primary base of
- /// some other base.
- VisitedVirtualBasesSetTy PrimaryVirtualBases;
-
- /// ComputeReturnAdjustment - Compute the return adjustment given a return
- /// adjustment base offset.
- ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
-
- /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
- /// the 'this' pointer from the base subobject to the derived subobject.
- BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
- BaseSubobject Derived) const;
-
- /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
- /// given virtual member function, its offset in the layout class and its
- /// final overrider.
- ThisAdjustment
- ComputeThisAdjustment(const CXXMethodDecl *MD,
- CharUnits BaseOffsetInLayoutClass,
- FinalOverriders::OverriderInfo Overrider);
-
- /// AddMethod - Add a single virtual member function to the vtable
- /// components vector.
- void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
-
- /// IsOverriderUsed - Returns whether the overrider will ever be used in this
- /// part of the vtable.
- ///
- /// Itanium C++ ABI 2.5.2:
- ///
- /// struct A { virtual void f(); };
- /// struct B : virtual public A { int i; };
- /// struct C : virtual public A { int j; };
- /// struct D : public B, public C {};
- ///
- /// When B and C are declared, A is a primary base in each case, so although
- /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
- /// adjustment is required and no thunk is generated. However, inside D
- /// objects, A is no longer a primary base of C, so if we allowed calls to
- /// C::f() to use the copy of A's vtable in the C subobject, we would need
- /// to adjust this from C* to B::A*, which would require a third-party
- /// thunk. Since we require that a call to C::f() first convert to A*,
- /// C-in-D's copy of A's vtable is never referenced, so this is not
- /// necessary.
- bool IsOverriderUsed(const CXXMethodDecl *Overrider,
- CharUnits BaseOffsetInLayoutClass,
- const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- CharUnits FirstBaseOffsetInLayoutClass) const;
-
-
- /// AddMethods - Add the methods of this base subobject and all its
- /// primary bases to the vtable components vector.
- void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
- const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- CharUnits FirstBaseOffsetInLayoutClass,
- PrimaryBasesSetVectorTy &PrimaryBases);
-
- // LayoutVTable - Layout the vtable for the given base class, including its
- // secondary vtables and any vtables for virtual bases.
- void LayoutVTable();
-
- /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
- /// given base subobject, as well as all its secondary vtables.
- ///
- /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
- /// or a direct or indirect base of a virtual base.
- ///
- /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
- /// in the layout class.
- void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
- bool BaseIsVirtualInLayoutClass,
- CharUnits OffsetInLayoutClass);
-
- /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
- /// subobject.
- ///
- /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
- /// or a direct or indirect base of a virtual base.
- void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
- CharUnits OffsetInLayoutClass);
-
- /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
- /// class hierarchy.
- void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
- CharUnits OffsetInLayoutClass,
- VisitedVirtualBasesSetTy &VBases);
-
- /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
- /// given base (excluding any primary bases).
- void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
- VisitedVirtualBasesSetTy &VBases);
-
- /// isBuildingConstructionVTable - Return whether this vtable builder is
- /// building a construction vtable.
- bool isBuildingConstructorVTable() const {
- return MostDerivedClass != LayoutClass;
- }
-
-public:
- VTableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
- CharUnits MostDerivedClassOffset,
- bool MostDerivedClassIsVirtual, const
- CXXRecordDecl *LayoutClass)
- : VTables(VTables), MostDerivedClass(MostDerivedClass),
- MostDerivedClassOffset(MostDerivedClassOffset),
- MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
- LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
- Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
-
- LayoutVTable();
- }
-
- ThunksMapTy::const_iterator thunks_begin() const {
- return Thunks.begin();
- }
-
- ThunksMapTy::const_iterator thunks_end() const {
- return Thunks.end();
- }
-
- const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
- return VBaseOffsetOffsets;
- }
-
- /// getNumVTableComponents - Return the number of components in the vtable
- /// currently built.
- uint64_t getNumVTableComponents() const {
- return Components.size();
- }
-
- const uint64_t *vtable_components_data_begin() const {
- return reinterpret_cast<const uint64_t *>(Components.begin());
- }
-
- const uint64_t *vtable_components_data_end() const {
- return reinterpret_cast<const uint64_t *>(Components.end());
- }
-
- AddressPointsMapTy::const_iterator address_points_begin() const {
- return AddressPoints.begin();
- }
-
- AddressPointsMapTy::const_iterator address_points_end() const {
- return AddressPoints.end();
- }
-
- VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
- return VTableThunks.begin();
- }
-
- VTableThunksMapTy::const_iterator vtable_thunks_end() const {
- return VTableThunks.end();
- }
-
- /// dumpLayout - Dump the vtable layout.
- void dumpLayout(llvm::raw_ostream&);
-};
-
-void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
- assert(!isBuildingConstructorVTable() &&
- "Can't add thunks for construction vtable");
-
- llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
-
- // Check if we have this thunk already.
- if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
- ThunksVector.end())
- return;
-
- ThunksVector.push_back(Thunk);
-}
-
-typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
-
-/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
-/// the overridden methods that the function decl overrides.
-static void
-ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
- OverriddenMethodsSetTy& OverriddenMethods) {
- assert(MD->isVirtual() && "Method is not virtual!");
-
- for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
- E = MD->end_overridden_methods(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
-
- OverriddenMethods.insert(OverriddenMD);
-
- ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
- }
-}
-
-void VTableBuilder::ComputeThisAdjustments() {
- // Now go through the method info map and see if any of the methods need
- // 'this' pointer adjustments.
- for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
- E = MethodInfoMap.end(); I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- const MethodInfo &MethodInfo = I->second;
-
- // Ignore adjustments for unused function pointers.
- uint64_t VTableIndex = MethodInfo.VTableIndex;
- if (Components[VTableIndex].getKind() ==
- VTableComponent::CK_UnusedFunctionPointer)
- continue;
-
- // Get the final overrider for this method.
- FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(MD, MethodInfo.BaseOffset);
-
- // Check if we need an adjustment at all.
- if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
- // When a return thunk is needed by a derived class that overrides a
- // virtual base, gcc uses a virtual 'this' adjustment as well.
- // While the thunk itself might be needed by vtables in subclasses or
- // in construction vtables, there doesn't seem to be a reason for using
- // the thunk in this vtable. Still, we do so to match gcc.
- if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
- continue;
- }
-
- ThisAdjustment ThisAdjustment =
- ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
-
- if (ThisAdjustment.isEmpty())
- continue;
-
- // Add it.
- VTableThunks[VTableIndex].This = ThisAdjustment;
-
- if (isa<CXXDestructorDecl>(MD)) {
- // Add an adjustment for the deleting destructor as well.
- VTableThunks[VTableIndex + 1].This = ThisAdjustment;
- }
- }
-
- /// Clear the method info map.
- MethodInfoMap.clear();
-
- if (isBuildingConstructorVTable()) {
- // We don't need to store thunk information for construction vtables.
- return;
- }
-
- for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
- E = VTableThunks.end(); I != E; ++I) {
- const VTableComponent &Component = Components[I->first];
- const ThunkInfo &Thunk = I->second;
- const CXXMethodDecl *MD;
-
- switch (Component.getKind()) {
- default:
- llvm_unreachable("Unexpected vtable component kind!");
- case VTableComponent::CK_FunctionPointer:
- MD = Component.getFunctionDecl();
- break;
- case VTableComponent::CK_CompleteDtorPointer:
- MD = Component.getDestructorDecl();
- break;
- case VTableComponent::CK_DeletingDtorPointer:
- // We've already added the thunk when we saw the complete dtor pointer.
- continue;
- }
-
- if (MD->getParent() == MostDerivedClass)
- AddThunk(MD, Thunk);
- }
-}
-
-ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
- ReturnAdjustment Adjustment;
-
- if (!Offset.isEmpty()) {
- if (Offset.VirtualBase) {
- // Get the virtual base offset offset.
- if (Offset.DerivedClass == MostDerivedClass) {
- // We can get the offset offset directly from our map.
- Adjustment.VBaseOffsetOffset =
- VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
- } else {
- Adjustment.VBaseOffsetOffset =
- VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
- Offset.VirtualBase).getQuantity();
- }
- }
-
- Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
- }
-
- return Adjustment;
-}
-
-BaseOffset
-VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
- BaseSubobject Derived) const {
- const CXXRecordDecl *BaseRD = Base.getBase();
- const CXXRecordDecl *DerivedRD = Derived.getBase();
-
- CXXBasePaths Paths(/*FindAmbiguities=*/true,
- /*RecordPaths=*/true, /*DetectVirtual=*/true);
-
- if (!const_cast<CXXRecordDecl *>(DerivedRD)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return BaseOffset();
- }
-
- // We have to go through all the paths, and see which one leads us to the
- // right base subobject.
- for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
- I != E; ++I) {
- BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
-
- CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
-
- if (Offset.VirtualBase) {
- // If we have a virtual base class, the non-virtual offset is relative
- // to the virtual base class offset.
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- /// Get the virtual base offset, relative to the most derived class
- /// layout.
- OffsetToBaseSubobject +=
- LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
- } else {
- // Otherwise, the non-virtual offset is relative to the derived class
- // offset.
- OffsetToBaseSubobject += Derived.getBaseOffset();
- }
-
- // Check if this path gives us the right base subobject.
- if (OffsetToBaseSubobject == Base.getBaseOffset()) {
- // Since we're going from the base class _to_ the derived class, we'll
- // invert the non-virtual offset here.
- Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
- return Offset;
- }
- }
-
- return BaseOffset();
-}
-
-ThisAdjustment
-VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
- CharUnits BaseOffsetInLayoutClass,
- FinalOverriders::OverriderInfo Overrider) {
- // Ignore adjustments for pure virtual member functions.
- if (Overrider.Method->isPure())
- return ThisAdjustment();
-
- BaseSubobject OverriddenBaseSubobject(MD->getParent(),
- BaseOffsetInLayoutClass);
-
- BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
- Overrider.Offset);
-
- // Compute the adjustment offset.
- BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
- OverriderBaseSubobject);
- if (Offset.isEmpty())
- return ThisAdjustment();
-
- ThisAdjustment Adjustment;
-
- if (Offset.VirtualBase) {
- // Get the vcall offset map for this virtual base.
- VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
-
- if (VCallOffsets.empty()) {
- // We don't have vcall offsets for this virtual base, go ahead and
- // build them.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
- /*FinalOverriders=*/0,
- BaseSubobject(Offset.VirtualBase,
- CharUnits::Zero()),
- /*BaseIsVirtual=*/true,
- /*OffsetInLayoutClass=*/
- CharUnits::Zero());
-
- VCallOffsets = Builder.getVCallOffsets();
- }
-
- Adjustment.VCallOffsetOffset =
- VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
- }
-
- // Set the non-virtual part of the adjustment.
- Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
-
- return Adjustment;
-}
-
-void
-VTableBuilder::AddMethod(const CXXMethodDecl *MD,
- ReturnAdjustment ReturnAdjustment) {
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- assert(ReturnAdjustment.isEmpty() &&
- "Destructor can't have return adjustment!");
-
- // Add both the complete destructor and the deleting destructor.
- Components.push_back(VTableComponent::MakeCompleteDtor(DD));
- Components.push_back(VTableComponent::MakeDeletingDtor(DD));
- } else {
- // Add the return adjustment if necessary.
- if (!ReturnAdjustment.isEmpty())
- VTableThunks[Components.size()].Return = ReturnAdjustment;
-
- // Add the function.
- Components.push_back(VTableComponent::MakeFunction(MD));
- }
-}
-
-/// OverridesIndirectMethodInBase - Return whether the given member function
-/// overrides any methods in the set of given bases.
-/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
-/// For example, if we have:
-///
-/// struct A { virtual void f(); }
-/// struct B : A { virtual void f(); }
-/// struct C : B { virtual void f(); }
-///
-/// OverridesIndirectMethodInBase will return true if given C::f as the method
-/// and { A } as the set of bases.
-static bool
-OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
- VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
- if (Bases.count(MD->getParent()))
- return true;
-
- for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
- E = MD->end_overridden_methods(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
-
- // Check "indirect overriders".
- if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
- return true;
- }
-
- return false;
-}
-
-bool
-VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
- CharUnits BaseOffsetInLayoutClass,
- const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- CharUnits FirstBaseOffsetInLayoutClass) const {
- // If the base and the first base in the primary base chain have the same
- // offsets, then this overrider will be used.
- if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
- return true;
-
- // We know now that Base (or a direct or indirect base of it) is a primary
- // base in part of the class hierarchy, but not a primary base in the most
- // derived class.
-
- // If the overrider is the first base in the primary base chain, we know
- // that the overrider will be used.
- if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
- return true;
-
- VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
-
- const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
- PrimaryBases.insert(RD);
-
- // Now traverse the base chain, starting with the first base, until we find
- // the base that is no longer a primary base.
- while (true) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (!PrimaryBase)
- break;
-
- if (Layout.isPrimaryBaseVirtual()) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary base should always be at offset 0!");
-
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- // Now check if this is the primary base that is not a primary base in the
- // most derived class.
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
- FirstBaseOffsetInLayoutClass) {
- // We found it, stop walking the chain.
- break;
- }
- } else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary base should always be at offset 0!");
- }
-
- if (!PrimaryBases.insert(PrimaryBase))
- assert(false && "Found a duplicate primary base!");
-
- RD = PrimaryBase;
- }
-
- // If the final overrider is an override of one of the primary bases,
- // then we know that it will be used.
- return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
-}
-
-/// FindNearestOverriddenMethod - Given a method, returns the overridden method
-/// from the nearest base. Returns null if no method was found.
-static const CXXMethodDecl *
-FindNearestOverriddenMethod(const CXXMethodDecl *MD,
- VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
- OverriddenMethodsSetTy OverriddenMethods;
- ComputeAllOverriddenMethods(MD, OverriddenMethods);
-
- for (int I = Bases.size(), E = 0; I != E; --I) {
- const CXXRecordDecl *PrimaryBase = Bases[I - 1];
-
- // Now check the overriden methods.
- for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
- E = OverriddenMethods.end(); I != E; ++I) {
- const CXXMethodDecl *OverriddenMD = *I;
-
- // We found our overridden method.
- if (OverriddenMD->getParent() == PrimaryBase)
- return OverriddenMD;
- }
- }
-
- return 0;
-}
-
-void
-VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
- const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
- CharUnits FirstBaseOffsetInLayoutClass,
- PrimaryBasesSetVectorTy &PrimaryBases) {
- const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- CharUnits PrimaryBaseOffset;
- CharUnits PrimaryBaseOffsetInLayoutClass;
- if (Layout.isPrimaryBaseVirtual()) {
- assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary vbase should have a zero offset!");
-
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
-
- PrimaryBaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
-
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
- } else {
- assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
- "Primary base should have a zero offset!");
-
- PrimaryBaseOffset = Base.getBaseOffset();
- PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
- }
-
- AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
- PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
- FirstBaseOffsetInLayoutClass, PrimaryBases);
-
- if (!PrimaryBases.insert(PrimaryBase))
- assert(false && "Found a duplicate primary base!");
- }
-
- // Now go through all virtual member functions and add them.
- for (CXXRecordDecl::method_iterator I = RD->method_begin(),
- E = RD->method_end(); I != E; ++I) {
- const CXXMethodDecl *MD = *I;
-
- if (!MD->isVirtual())
- continue;
-
- // Get the final overrider.
- FinalOverriders::OverriderInfo Overrider =
- Overriders.getOverrider(MD, Base.getBaseOffset());
-
- // Check if this virtual member function overrides a method in a primary
- // base. If this is the case, and the return type doesn't require adjustment
- // then we can just use the member function from the primary base.
- if (const CXXMethodDecl *OverriddenMD =
- FindNearestOverriddenMethod(MD, PrimaryBases)) {
- if (ComputeReturnAdjustmentBaseOffset(Context, MD,
- OverriddenMD).isEmpty()) {
- // Replace the method info of the overridden method with our own
- // method.
- assert(MethodInfoMap.count(OverriddenMD) &&
- "Did not find the overridden method!");
- MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
-
- MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
- OverriddenMethodInfo.VTableIndex);
-
- assert(!MethodInfoMap.count(MD) &&
- "Should not have method info for this method yet!");
-
- MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
- MethodInfoMap.erase(OverriddenMD);
-
- // If the overridden method exists in a virtual base class or a direct
- // or indirect base class of a virtual base class, we need to emit a
- // thunk if we ever have a class hierarchy where the base class is not
- // a primary base in the complete object.
- if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
- // Compute the this adjustment.
- ThisAdjustment ThisAdjustment =
- ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
- Overrider);
-
- if (ThisAdjustment.VCallOffsetOffset &&
- Overrider.Method->getParent() == MostDerivedClass) {
-
- // There's no return adjustment from OverriddenMD and MD,
- // but that doesn't mean there isn't one between MD and
- // the final overrider.
- BaseOffset ReturnAdjustmentOffset =
- ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
- ReturnAdjustment ReturnAdjustment =
- ComputeReturnAdjustment(ReturnAdjustmentOffset);
-
- // This is a virtual thunk for the most derived class, add it.
- AddThunk(Overrider.Method,
- ThunkInfo(ThisAdjustment, ReturnAdjustment));
- }
- }
-
- continue;
- }
- }
-
- // Insert the method info for this method.
- MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
- Components.size());
-
- assert(!MethodInfoMap.count(MD) &&
- "Should not have method info for this method yet!");
- MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
-
- // Check if this overrider is going to be used.
- const CXXMethodDecl *OverriderMD = Overrider.Method;
- if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
- FirstBaseInPrimaryBaseChain,
- FirstBaseOffsetInLayoutClass)) {
- Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
- continue;
- }
-
- // Check if this overrider needs a return adjustment.
- // We don't want to do this for pure virtual member functions.
- BaseOffset ReturnAdjustmentOffset;
- if (!OverriderMD->isPure()) {
- ReturnAdjustmentOffset =
- ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
- }
-
- ReturnAdjustment ReturnAdjustment =
- ComputeReturnAdjustment(ReturnAdjustmentOffset);
-
- AddMethod(Overrider.Method, ReturnAdjustment);
- }
-}
-
-void VTableBuilder::LayoutVTable() {
- LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
- CharUnits::Zero()),
- /*BaseIsMorallyVirtual=*/false,
- MostDerivedClassIsVirtual,
- MostDerivedClassOffset);
-
- VisitedVirtualBasesSetTy VBases;
-
- // Determine the primary virtual bases.
- DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
- VBases);
- VBases.clear();
-
- LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
-}
-
-void
-VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
- bool BaseIsVirtualInLayoutClass,
- CharUnits OffsetInLayoutClass) {
- assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
-
- // Add vcall and vbase offsets for this vtable.
- VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
- Base, BaseIsVirtualInLayoutClass,
- OffsetInLayoutClass);
- Components.append(Builder.components_begin(), Builder.components_end());
-
- // Check if we need to add these vcall offsets.
- if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
- VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
-
- if (VCallOffsets.empty())
- VCallOffsets = Builder.getVCallOffsets();
- }
-
- // If we're laying out the most derived class we want to keep track of the
- // virtual base class offset offsets.
- if (Base.getBase() == MostDerivedClass)
- VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
-
- // Add the offset to top.
- CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
- Components.push_back(
- VTableComponent::MakeOffsetToTop(OffsetToTop));
-
- // Next, add the RTTI.
- Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
-
- uint64_t AddressPoint = Components.size();
-
- // Now go through all virtual member functions and add them.
- PrimaryBasesSetVectorTy PrimaryBases;
- AddMethods(Base, OffsetInLayoutClass,
- Base.getBase(), OffsetInLayoutClass,
- PrimaryBases);
-
- // Compute 'this' pointer adjustments.
- ComputeThisAdjustments();
-
- // Add all address points.
- const CXXRecordDecl *RD = Base.getBase();
- while (true) {
- AddressPoints.insert(std::make_pair(
- BaseSubobject(RD, OffsetInLayoutClass),
- AddressPoint));
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (!PrimaryBase)
- break;
-
- if (Layout.isPrimaryBaseVirtual()) {
- // Check if this virtual primary base is a primary base in the layout
- // class. If it's not, we don't want to add it.
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
- OffsetInLayoutClass) {
- // We don't want to add this class (or any of its primary bases).
- break;
- }
- }
-
- RD = PrimaryBase;
- }
-
- // Layout secondary vtables.
- LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
-}
-
-void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
- bool BaseIsMorallyVirtual,
- CharUnits OffsetInLayoutClass) {
- // Itanium C++ ABI 2.5.2:
- // Following the primary virtual table of a derived class are secondary
- // virtual tables for each of its proper base classes, except any primary
- // base(s) with which it shares its primary virtual table.
-
- const CXXRecordDecl *RD = Base.getBase();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- // Ignore virtual bases, we'll emit them later.
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Ignore bases that don't have a vtable.
- if (!BaseDecl->isDynamicClass())
- continue;
-
- if (isBuildingConstructorVTable()) {
- // Itanium C++ ABI 2.6.4:
- // Some of the base class subobjects may not need construction virtual
- // tables, which will therefore not be present in the construction
- // virtual table group, even though the subobject virtual tables are
- // present in the main virtual table group for the complete object.
- if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
- continue;
- }
-
- // Get the base offset of this base.
- CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
- CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
-
- CharUnits BaseOffsetInLayoutClass =
- OffsetInLayoutClass + RelativeBaseOffset;
-
- // Don't emit a secondary vtable for a primary base. We might however want
- // to emit secondary vtables for other bases of this base.
- if (BaseDecl == PrimaryBase) {
- LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
- BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
- continue;
- }
-
- // Layout the primary vtable (and any secondary vtables) for this base.
- LayoutPrimaryAndSecondaryVTables(
- BaseSubobject(BaseDecl, BaseOffset),
- BaseIsMorallyVirtual,
- /*BaseIsVirtualInLayoutClass=*/false,
- BaseOffsetInLayoutClass);
- }
-}
-
-void
-VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
- CharUnits OffsetInLayoutClass,
- VisitedVirtualBasesSetTy &VBases) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Check if this base has a primary base.
- if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
-
- // Check if it's virtual.
- if (Layout.isPrimaryBaseVirtual()) {
- bool IsPrimaryVirtualBase = true;
-
- if (isBuildingConstructorVTable()) {
- // Check if the base is actually a primary base in the class we use for
- // layout.
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- CharUnits PrimaryBaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
-
- // We know that the base is not a primary base in the layout class if
- // the base offsets are different.
- if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
- IsPrimaryVirtualBase = false;
- }
-
- if (IsPrimaryVirtualBase)
- PrimaryVirtualBases.insert(PrimaryBase);
- }
- }
-
- // Traverse bases, looking for more primary virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- CharUnits BaseOffsetInLayoutClass;
-
- if (I->isVirtual()) {
- if (!VBases.insert(BaseDecl))
- continue;
-
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
-
- BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
- } else {
- BaseOffsetInLayoutClass =
- OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
- }
-
- DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
- }
-}
-
-void
-VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
- VisitedVirtualBasesSetTy &VBases) {
- // Itanium C++ ABI 2.5.2:
- // Then come the virtual base virtual tables, also in inheritance graph
- // order, and again excluding primary bases (which share virtual tables with
- // the classes for which they are primary).
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Check if this base needs a vtable. (If it's virtual, not a primary base
- // of some other class, and we haven't visited it before).
- if (I->isVirtual() && BaseDecl->isDynamicClass() &&
- !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
- CharUnits BaseOffset =
- MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
-
- const ASTRecordLayout &LayoutClassLayout =
- Context.getASTRecordLayout(LayoutClass);
- CharUnits BaseOffsetInLayoutClass =
- LayoutClassLayout.getVBaseClassOffset(BaseDecl);
-
- LayoutPrimaryAndSecondaryVTables(
- BaseSubobject(BaseDecl, BaseOffset),
- /*BaseIsMorallyVirtual=*/true,
- /*BaseIsVirtualInLayoutClass=*/true,
- BaseOffsetInLayoutClass);
- }
-
- // We only need to check the base for virtual base vtables if it actually
- // has virtual bases.
- if (BaseDecl->getNumVBases())
- LayoutVTablesForVirtualBases(BaseDecl, VBases);
- }
-}
-
-/// dumpLayout - Dump the vtable layout.
-void VTableBuilder::dumpLayout(llvm::raw_ostream& Out) {
-
- if (isBuildingConstructorVTable()) {
- Out << "Construction vtable for ('";
- Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
- Out << MostDerivedClassOffset.getQuantity() << ") in '";
- Out << LayoutClass->getQualifiedNameAsString();
- } else {
- Out << "Vtable for '";
- Out << MostDerivedClass->getQualifiedNameAsString();
- }
- Out << "' (" << Components.size() << " entries).\n";
-
- // Iterate through the address points and insert them into a new map where
- // they are keyed by the index and not the base object.
- // Since an address point can be shared by multiple subobjects, we use an
- // STL multimap.
- std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
- for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
- E = AddressPoints.end(); I != E; ++I) {
- const BaseSubobject& Base = I->first;
- uint64_t Index = I->second;
-
- AddressPointsByIndex.insert(std::make_pair(Index, Base));
- }
-
- for (unsigned I = 0, E = Components.size(); I != E; ++I) {
- uint64_t Index = I;
-
- Out << llvm::format("%4d | ", I);
-
- const VTableComponent &Component = Components[I];
-
- // Dump the component.
- switch (Component.getKind()) {
-
- case VTableComponent::CK_VCallOffset:
- Out << "vcall_offset ("
- << Component.getVCallOffset().getQuantity()
- << ")";
- break;
-
- case VTableComponent::CK_VBaseOffset:
- Out << "vbase_offset ("
- << Component.getVBaseOffset().getQuantity()
- << ")";
- break;
-
- case VTableComponent::CK_OffsetToTop:
- Out << "offset_to_top ("
- << Component.getOffsetToTop().getQuantity()
- << ")";
- break;
-
- case VTableComponent::CK_RTTI:
- Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
- break;
-
- case VTableComponent::CK_FunctionPointer: {
- const CXXMethodDecl *MD = Component.getFunctionDecl();
-
- std::string Str =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
- Out << Str;
- if (MD->isPure())
- Out << " [pure]";
-
- ThunkInfo Thunk = VTableThunks.lookup(I);
- if (!Thunk.isEmpty()) {
- // If this function pointer has a return adjustment, dump it.
- if (!Thunk.Return.isEmpty()) {
- Out << "\n [return adjustment: ";
- Out << Thunk.Return.NonVirtual << " non-virtual";
-
- if (Thunk.Return.VBaseOffsetOffset) {
- Out << ", " << Thunk.Return.VBaseOffsetOffset;
- Out << " vbase offset offset";
- }
-
- Out << ']';
- }
-
- // If this function pointer has a 'this' pointer adjustment, dump it.
- if (!Thunk.This.isEmpty()) {
- Out << "\n [this adjustment: ";
- Out << Thunk.This.NonVirtual << " non-virtual";
-
- if (Thunk.This.VCallOffsetOffset) {
- Out << ", " << Thunk.This.VCallOffsetOffset;
- Out << " vcall offset offset";
- }
-
- Out << ']';
- }
- }
-
- break;
- }
-
- case VTableComponent::CK_CompleteDtorPointer:
- case VTableComponent::CK_DeletingDtorPointer: {
- bool IsComplete =
- Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
-
- const CXXDestructorDecl *DD = Component.getDestructorDecl();
-
- Out << DD->getQualifiedNameAsString();
- if (IsComplete)
- Out << "() [complete]";
- else
- Out << "() [deleting]";
-
- if (DD->isPure())
- Out << " [pure]";
-
- ThunkInfo Thunk = VTableThunks.lookup(I);
- if (!Thunk.isEmpty()) {
- // If this destructor has a 'this' pointer adjustment, dump it.
- if (!Thunk.This.isEmpty()) {
- Out << "\n [this adjustment: ";
- Out << Thunk.This.NonVirtual << " non-virtual";
-
- if (Thunk.This.VCallOffsetOffset) {
- Out << ", " << Thunk.This.VCallOffsetOffset;
- Out << " vcall offset offset";
- }
-
- Out << ']';
- }
- }
-
- break;
- }
-
- case VTableComponent::CK_UnusedFunctionPointer: {
- const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
-
- std::string Str =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
- Out << "[unused] " << Str;
- if (MD->isPure())
- Out << " [pure]";
- }
-
- }
-
- Out << '\n';
-
- // Dump the next address point.
- uint64_t NextIndex = Index + 1;
- if (AddressPointsByIndex.count(NextIndex)) {
- if (AddressPointsByIndex.count(NextIndex) == 1) {
- const BaseSubobject &Base =
- AddressPointsByIndex.find(NextIndex)->second;
-
- Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
- Out << ", " << Base.getBaseOffset().getQuantity();
- Out << ") vtable address --\n";
- } else {
- CharUnits BaseOffset =
- AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
-
- // We store the class names in a set to get a stable order.
- std::set<std::string> ClassNames;
- for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
- AddressPointsByIndex.lower_bound(NextIndex), E =
- AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
- assert(I->second.getBaseOffset() == BaseOffset &&
- "Invalid base offset!");
- const CXXRecordDecl *RD = I->second.getBase();
- ClassNames.insert(RD->getQualifiedNameAsString());
- }
-
- for (std::set<std::string>::const_iterator I = ClassNames.begin(),
- E = ClassNames.end(); I != E; ++I) {
- Out << " -- (" << *I;
- Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
- }
- }
- }
- }
-
- Out << '\n';
-
- if (isBuildingConstructorVTable())
- return;
-
- if (MostDerivedClass->getNumVBases()) {
- // We store the virtual base class names and their offsets in a map to get
- // a stable order.
-
- std::map<std::string, CharUnits> ClassNamesAndOffsets;
- for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
- E = VBaseOffsetOffsets.end(); I != E; ++I) {
- std::string ClassName = I->first->getQualifiedNameAsString();
- CharUnits OffsetOffset = I->second;
- ClassNamesAndOffsets.insert(
- std::make_pair(ClassName, OffsetOffset));
- }
-
- Out << "Virtual base offset offsets for '";
- Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
- Out << ClassNamesAndOffsets.size();
- Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
-
- for (std::map<std::string, CharUnits>::const_iterator I =
- ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
- I != E; ++I)
- Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
-
- Out << "\n";
- }
-
- if (!Thunks.empty()) {
- // We store the method names in a map to get a stable order.
- std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
-
- for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
- I != E; ++I) {
- const CXXMethodDecl *MD = I->first;
- std::string MethodName =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
-
- MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
- }
-
- for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
- MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
- I != E; ++I) {
- const std::string &MethodName = I->first;
- const CXXMethodDecl *MD = I->second;
-
- ThunkInfoVectorTy ThunksVector = Thunks[MD];
- std::sort(ThunksVector.begin(), ThunksVector.end());
-
- Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
- Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
-
- for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
- const ThunkInfo &Thunk = ThunksVector[I];
-
- Out << llvm::format("%4d | ", I);
-
- // If this function pointer has a return pointer adjustment, dump it.
- if (!Thunk.Return.isEmpty()) {
- Out << "return adjustment: " << Thunk.This.NonVirtual;
- Out << " non-virtual";
- if (Thunk.Return.VBaseOffsetOffset) {
- Out << ", " << Thunk.Return.VBaseOffsetOffset;
- Out << " vbase offset offset";
- }
-
- if (!Thunk.This.isEmpty())
- Out << "\n ";
- }
-
- // If this function pointer has a 'this' pointer adjustment, dump it.
- if (!Thunk.This.isEmpty()) {
- Out << "this adjustment: ";
- Out << Thunk.This.NonVirtual << " non-virtual";
-
- if (Thunk.This.VCallOffsetOffset) {
- Out << ", " << Thunk.This.VCallOffsetOffset;
- Out << " vcall offset offset";
- }
- }
-
- Out << '\n';
- }
-
- Out << '\n';
- }
- }
-
- // Compute the vtable indices for all the member functions.
- // Store them in a map keyed by the index so we'll get a sorted table.
- std::map<uint64_t, std::string> IndicesMap;
-
- for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
- e = MostDerivedClass->method_end(); i != e; ++i) {
- const CXXMethodDecl *MD = *i;
-
- // We only want virtual member functions.
- if (!MD->isVirtual())
- continue;
-
- std::string MethodName =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Complete))] =
- MethodName + " [complete]";
- IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Deleting))] =
- MethodName + " [deleting]";
- } else {
- IndicesMap[VTables.getMethodVTableIndex(MD)] = MethodName;
- }
- }
-
- // Print the vtable indices for all the member functions.
- if (!IndicesMap.empty()) {
- Out << "VTable indices for '";
- Out << MostDerivedClass->getQualifiedNameAsString();
- Out << "' (" << IndicesMap.size() << " entries).\n";
-
- for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
- E = IndicesMap.end(); I != E; ++I) {
- uint64_t VTableIndex = I->first;
- const std::string &MethodName = I->second;
-
- Out << llvm::format(" %4u | ", VTableIndex) << MethodName << '\n';
- }
- }
-
- Out << '\n';
-}
-
-}
-
-static void
-CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
- VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (!PrimaryBase)
- return;
-
- CollectPrimaryBases(PrimaryBase, Context, PrimaryBases);
-
- if (!PrimaryBases.insert(PrimaryBase))
- assert(false && "Found a duplicate primary base!");
-}
-
-void CodeGenVTables::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
-
- // Itanium C++ ABI 2.5.2:
- // The order of the virtual function pointers in a virtual table is the
- // order of declaration of the corresponding member functions in the class.
- //
- // There is an entry for any virtual function declared in a class,
- // whether it is a new function or overrides a base class function,
- // unless it overrides a function from the primary base, and conversion
- // between their return types does not require an adjustment.
-
- int64_t CurrentIndex = 0;
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (PrimaryBase) {
- assert(PrimaryBase->isDefinition() &&
- "Should have the definition decl of the primary base!");
-
- // Since the record decl shares its vtable pointer with the primary base
- // we need to start counting at the end of the primary base's vtable.
- CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
- }
-
- // Collect all the primary bases, so we can check whether methods override
- // a method from the base.
- VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
- CollectPrimaryBases(RD, CGM.getContext(), PrimaryBases);
-
- const CXXDestructorDecl *ImplicitVirtualDtor = 0;
-
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end(); i != e; ++i) {
- const CXXMethodDecl *MD = *i;
-
- // We only want virtual methods.
- if (!MD->isVirtual())
- continue;
-
- // Check if this method overrides a method in the primary base.
- if (const CXXMethodDecl *OverriddenMD =
- FindNearestOverriddenMethod(MD, PrimaryBases)) {
- // Check if converting from the return type of the method to the
- // return type of the overridden method requires conversion.
- if (ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
- OverriddenMD).isEmpty()) {
- // This index is shared between the index in the vtable of the primary
- // base class.
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- const CXXDestructorDecl *OverriddenDD =
- cast<CXXDestructorDecl>(OverriddenMD);
-
- // Add both the complete and deleting entries.
- MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
- getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
- MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
- getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
- } else {
- MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
- }
-
- // We don't need to add an entry for this method.
- continue;
- }
- }
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- if (MD->isImplicit()) {
- assert(!ImplicitVirtualDtor &&
- "Did already see an implicit virtual dtor!");
- ImplicitVirtualDtor = DD;
- continue;
- }
-
- // Add the complete dtor.
- MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
-
- // Add the deleting dtor.
- MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
- } else {
- // Add the entry.
- MethodVTableIndices[MD] = CurrentIndex++;
- }
- }
-
- if (ImplicitVirtualDtor) {
- // Itanium C++ ABI 2.5.2:
- // If a class has an implicitly-defined virtual destructor,
- // its entries come after the declared virtual function pointers.
-
- // Add the complete dtor.
- MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
- CurrentIndex++;
-
- // Add the deleting dtor.
- MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
- CurrentIndex++;
- }
-
- NumVirtualFunctionPointers[RD] = CurrentIndex;
-}
+CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM), VTContext(CGM.getContext()) { }
bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
@@ -2449,75 +58,6 @@ bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
return KeyFunction->hasBody();
}
-uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
- llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
- NumVirtualFunctionPointers.find(RD);
- if (I != NumVirtualFunctionPointers.end())
- return I->second;
-
- ComputeMethodVTableIndices(RD);
-
- I = NumVirtualFunctionPointers.find(RD);
- assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
- return I->second;
-}
-
-uint64_t CodeGenVTables::getMethodVTableIndex(GlobalDecl GD) {
- MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
- if (I != MethodVTableIndices.end())
- return I->second;
-
- const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
-
- ComputeMethodVTableIndices(RD);
-
- I = MethodVTableIndices.find(GD);
- assert(I != MethodVTableIndices.end() && "Did not find index!");
- return I->second;
-}
-
-CharUnits
-CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase) {
- ClassPairTy ClassPair(RD, VBase);
-
- VirtualBaseClassOffsetOffsetsMapTy::iterator I =
- VirtualBaseClassOffsetOffsets.find(ClassPair);
- if (I != VirtualBaseClassOffsetOffsets.end())
- return I->second;
-
- VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
- BaseSubobject(RD, CharUnits::Zero()),
- /*BaseIsVirtual=*/false,
- /*OffsetInLayoutClass=*/CharUnits::Zero());
-
- for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
- Builder.getVBaseOffsetOffsets().begin(),
- E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
- // Insert all types.
- ClassPairTy ClassPair(RD, I->first);
-
- VirtualBaseClassOffsetOffsets.insert(
- std::make_pair(ClassPair, I->second));
- }
-
- I = VirtualBaseClassOffsetOffsets.find(ClassPair);
- assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
-
- return I->second;
-}
-
-uint64_t
-CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) {
- assert(AddressPoints.count(std::make_pair(RD, Base)) &&
- "Did not find address point!");
-
- uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base));
- assert(AddressPoint && "Address point must not be zero!");
-
- return AddressPoint;
-}
-
llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
const ThunkInfo &Thunk) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
@@ -2532,7 +72,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
Out.flush();
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
}
@@ -2543,7 +83,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return Ptr;
- const llvm::Type *Int8PtrTy =
+ llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
@@ -2554,7 +94,7 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
}
if (VirtualAdjustment) {
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
// Do the virtual adjustment.
@@ -2704,7 +244,7 @@ void CodeGenFunction::GenerateVarArgsThunk(
QualType ResultType = FPT->getResultType();
// Get the original function
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(FnInfo, /*IsVariadic*/true);
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
llvm::Function *BaseFn = cast<llvm::Function>(Callee);
@@ -2811,7 +351,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
}
// Get our callee.
- const llvm::Type *Ty =
+ llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
FPT->isVariadic());
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
@@ -2881,7 +421,7 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
"Shouldn't replace non-declaration");
// Remove the name from the old thunk function and get a new thunk.
- OldThunkFn->setName(llvm::StringRef());
+ OldThunkFn->setName(StringRef());
Entry = CGM.GetAddrOfThunk(GD, Thunk);
// If needed, replace the old thunk with a bitcast.
@@ -2953,122 +493,27 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
return;
- const CXXRecordDecl *RD = MD->getParent();
-
- // Compute VTable related info for this class.
- ComputeVTableRelatedInformation(RD, false);
-
- ThunksMapTy::const_iterator I = Thunks.find(MD);
- if (I == Thunks.end()) {
- // We did not find a thunk for this method.
+ const VTableContext::ThunkInfoVectorTy *ThunkInfoVector =
+ VTContext.getThunkInfo(MD);
+ if (!ThunkInfoVector)
return;
- }
- const ThunkInfoVectorTy &ThunkInfoVector = I->second;
- for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
- EmitThunk(GD, ThunkInfoVector[I], /*UseAvailableExternallyLinkage=*/false);
-}
-
-void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
- bool RequireVTable) {
- VTableLayoutData &Entry = VTableLayoutMap[RD];
-
- // We may need to generate a definition for this vtable.
- if (RequireVTable && !Entry.getInt()) {
- if (ShouldEmitVTableInThisTU(RD))
- CGM.DeferredVTables.push_back(RD);
-
- Entry.setInt(true);
- }
-
- // Check if we've computed this information before.
- if (Entry.getPointer())
- return;
-
- VTableBuilder Builder(*this, RD, CharUnits::Zero(),
- /*MostDerivedClassIsVirtual=*/0, RD);
-
- // Add the VTable layout.
- uint64_t NumVTableComponents = Builder.getNumVTableComponents();
- // -fapple-kext adds an extra entry at end of vtbl.
- bool IsAppleKext = CGM.getContext().getLangOptions().AppleKext;
- if (IsAppleKext)
- NumVTableComponents += 1;
-
- uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1];
- if (IsAppleKext)
- LayoutData[NumVTableComponents] = 0;
- Entry.setPointer(LayoutData);
-
- // Store the number of components.
- LayoutData[0] = NumVTableComponents;
-
- // Store the components.
- std::copy(Builder.vtable_components_data_begin(),
- Builder.vtable_components_data_end(),
- &LayoutData[1]);
-
- // Add the known thunks.
- Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
-
- // Add the thunks needed in this vtable.
- assert(!VTableThunksMap.count(RD) &&
- "Thunks already exists for this vtable!");
-
- VTableThunksTy &VTableThunks = VTableThunksMap[RD];
- VTableThunks.append(Builder.vtable_thunks_begin(),
- Builder.vtable_thunks_end());
-
- // Sort them.
- std::sort(VTableThunks.begin(), VTableThunks.end());
-
- // Add the address points.
- for (VTableBuilder::AddressPointsMapTy::const_iterator I =
- Builder.address_points_begin(), E = Builder.address_points_end();
- I != E; ++I) {
-
- uint64_t &AddressPoint = AddressPoints[std::make_pair(RD, I->first)];
-
- // Check if we already have the address points for this base.
- assert(!AddressPoint && "Address point already exists for this base!");
-
- AddressPoint = I->second;
- }
-
- // If we don't have the vbase information for this class, insert it.
- // getVirtualBaseOffsetOffset will compute it separately without computing
- // the rest of the vtable related information.
- if (!RD->getNumVBases())
- return;
-
- const RecordType *VBaseRT =
- RD->vbases_begin()->getType()->getAs<RecordType>();
- const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
-
- if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
- return;
-
- for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
- Builder.getVBaseOffsetOffsets().begin(),
- E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
- // Insert all types.
- ClassPairTy ClassPair(RD, I->first);
-
- VirtualBaseClassOffsetOffsets.insert(
- std::make_pair(ClassPair, I->second));
- }
+ for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
+ EmitThunk(GD, (*ThunkInfoVector)[I],
+ /*UseAvailableExternallyLinkage=*/false);
}
llvm::Constant *
CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
- const uint64_t *Components,
+ const VTableComponent *Components,
unsigned NumComponents,
- const VTableThunksTy &VTableThunks) {
- llvm::SmallVector<llvm::Constant *, 64> Inits;
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks) {
+ SmallVector<llvm::Constant *, 64> Inits;
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- const llvm::Type *PtrDiffTy =
+ llvm::Type *PtrDiffTy =
CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
QualType ClassType = CGM.getContext().getTagDeclType(RD);
@@ -3079,8 +524,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
llvm::Constant* PureVirtualFn = 0;
for (unsigned I = 0; I != NumComponents; ++I) {
- VTableComponent Component =
- VTableComponent::getFromOpaqueInteger(Components[I]);
+ VTableComponent Component = Components[I];
llvm::Constant *Init = 0;
@@ -3126,7 +570,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
// We have a pure virtual member function.
if (!PureVirtualFn) {
- const llvm::FunctionType *Ty =
+ llvm::FunctionType *Ty =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
/*isVarArg=*/false);
PureVirtualFn =
@@ -3138,7 +582,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
Init = PureVirtualFn;
} else {
// Check if we should use a thunk.
- if (NextVTableThunkIndex < VTableThunks.size() &&
+ if (NextVTableThunkIndex < NumVTableThunks &&
VTableThunks[NextVTableThunkIndex].first == I) {
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
@@ -3147,7 +591,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
NextVTableThunkIndex++;
} else {
- const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
}
@@ -3170,46 +614,45 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable)
+ return VTable;
+
+ // We may need to generate a definition for this vtable.
+ if (ShouldEmitVTableInThisTU(RD))
+ CGM.DeferredVTables.push_back(RD);
+
llvm::SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
- ComputeVTableRelatedInformation(RD, /*VTableRequired=*/true);
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
+ llvm::ArrayType::get(Int8PtrTy,
+ VTContext.getVTableLayout(RD).getNumVTableComponents());
- llvm::GlobalVariable *GV =
+ VTable =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
llvm::GlobalValue::ExternalLinkage);
- GV->setUnnamedAddr(true);
- return GV;
+ VTable->setUnnamedAddr(true);
+ return VTable;
}
void
CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
- // Dump the vtable layout if necessary.
- if (CGM.getLangOptions().DumpVTableLayouts) {
- VTableBuilder Builder(*this, RD, CharUnits::Zero(),
- /*MostDerivedClassIsVirtual=*/0, RD);
-
- Builder.dumpLayout(llvm::errs());
- }
+ const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
- assert(VTableThunksMap.count(RD) &&
- "No thunk status for this record decl!");
-
- const VTableThunksTy& Thunks = VTableThunksMap[RD];
-
// Create and set the initializer.
llvm::Constant *Init =
- CreateVTableInitializer(RD, getVTableComponentsData(RD),
- getNumVTableComponents(RD), Thunks);
+ CreateVTableInitializer(RD,
+ VTLayout.vtable_component_begin(),
+ VTLayout.getNumVTableComponents(),
+ VTLayout.vtable_thunk_begin(),
+ VTLayout.getNumVTableThunks());
VTable->setInitializer(Init);
// Set the correct linkage.
@@ -3225,17 +668,13 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
bool BaseIsVirtual,
llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
- VTableBuilder Builder(*this, Base.getBase(),
- Base.getBaseOffset(),
- /*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
-
- // Dump the vtable layout if necessary.
- if (CGM.getLangOptions().DumpVTableLayouts)
- Builder.dumpLayout(llvm::errs());
+ llvm::OwningPtr<VTableLayout> VTLayout(
+ VTContext.createConstructionVTableLayout(Base.getBase(),
+ Base.getBaseOffset(),
+ BaseIsVirtual, RD));
// Add the address points.
- AddressPoints.insert(Builder.address_points_begin(),
- Builder.address_points_end());
+ AddressPoints = VTLayout->getAddressPoints();
// Get the mangled construction vtable name.
llvm::SmallString<256> OutName;
@@ -3244,11 +683,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
Out);
Out.flush();
- llvm::StringRef Name = OutName.str();
+ StringRef Name = OutName.str();
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents());
+ llvm::ArrayType::get(Int8PtrTy, VTLayout->getNumVTableComponents());
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
@@ -3258,19 +697,13 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// V-tables are always unnamed_addr.
VTable->setUnnamedAddr(true);
- // Add the thunks.
- VTableThunksTy VTableThunks;
- VTableThunks.append(Builder.vtable_thunks_begin(),
- Builder.vtable_thunks_end());
-
- // Sort them.
- std::sort(VTableThunks.begin(), VTableThunks.end());
-
// Create and set the initializer.
llvm::Constant *Init =
CreateVTableInitializer(Base.getBase(),
- Builder.vtable_components_data_begin(),
- Builder.getNumVTableComponents(), VTableThunks);
+ VTLayout->vtable_component_begin(),
+ VTLayout->getNumVTableComponents(),
+ VTLayout->vtable_thunk_begin(),
+ VTLayout->getNumVTableThunks());
VTable->setInitializer(Init);
return VTable;
@@ -3279,13 +712,10 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
void
CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
- llvm::GlobalVariable *&VTable = VTables[RD];
- if (VTable) {
- assert(VTable->getInitializer() && "VTable doesn't have a definition!");
+ llvm::GlobalVariable *VTable = GetAddrOfVTable(RD);
+ if (VTable->hasInitializer())
return;
- }
- VTable = GetAddrOfVTable(RD);
EmitVTableDefinition(VTable, Linkage, RD);
if (RD->getNumVBases()) {
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index eff6e56c1f80..828330e5e3c4 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -17,8 +17,10 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/GlobalVariable.h"
#include "clang/Basic/ABI.h"
+#include "clang/AST/BaseSubobject.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
namespace clang {
class CXXRecordDecl;
@@ -26,145 +28,18 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
-// BaseSubobject - Uniquely identifies a direct or indirect base class.
-// Stores both the base class decl and the offset from the most derived class to
-// the base class.
-class BaseSubobject {
- /// Base - The base class declaration.
- const CXXRecordDecl *Base;
-
- /// BaseOffset - The offset from the most derived class to the base class.
- CharUnits BaseOffset;
-
-public:
- BaseSubobject(const CXXRecordDecl *Base, CharUnits BaseOffset)
- : Base(Base), BaseOffset(BaseOffset) { }
-
- /// getBase - Returns the base class declaration.
- const CXXRecordDecl *getBase() const { return Base; }
-
- /// getBaseOffset - Returns the base class offset.
- CharUnits getBaseOffset() const { return BaseOffset; }
-
- friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
- return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
- }
-};
-
-} // end namespace CodeGen
-} // end namespace clang
-
-namespace llvm {
-
-template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
- static clang::CodeGen::BaseSubobject getEmptyKey() {
- return clang::CodeGen::BaseSubobject(
- DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
- clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getEmptyKey()));
- }
-
- static clang::CodeGen::BaseSubobject getTombstoneKey() {
- return clang::CodeGen::BaseSubobject(
- DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
- clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getTombstoneKey()));
- }
-
- static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
- return
- DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
- DenseMapInfo<int64_t>::getHashValue(Base.getBaseOffset().getQuantity());
- }
-
- static bool isEqual(const clang::CodeGen::BaseSubobject &LHS,
- const clang::CodeGen::BaseSubobject &RHS) {
- return LHS == RHS;
- }
-};
-
-// It's OK to treat BaseSubobject as a POD type.
-template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
- static const bool value = true;
-};
-
-}
-
-namespace clang {
-namespace CodeGen {
-
class CodeGenVTables {
CodeGenModule &CGM;
- /// MethodVTableIndices - Contains the index (relative to the vtable address
- /// point) where the function pointer for a virtual function is stored.
- typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
- MethodVTableIndicesTy MethodVTableIndices;
-
- typedef std::pair<const CXXRecordDecl *,
- const CXXRecordDecl *> ClassPairTy;
-
- /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
- /// the address point) in chars where the offsets for virtual bases of a class
- /// are stored.
- typedef llvm::DenseMap<ClassPairTy, CharUnits>
- VirtualBaseClassOffsetOffsetsMapTy;
- VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
+ VTableContext VTContext;
/// VTables - All the vtables which have been defined.
llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
- /// NumVirtualFunctionPointers - Contains the number of virtual function
- /// pointers in the vtable for a given record decl.
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
-
- typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
- typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
-
- /// Thunks - Contains all thunks that a given method decl will need.
- ThunksMapTy Thunks;
-
- // The layout entry and a bool indicating whether we've actually emitted
- // the vtable.
- typedef llvm::PointerIntPair<uint64_t *, 1, bool> VTableLayoutData;
- typedef llvm::DenseMap<const CXXRecordDecl *, VTableLayoutData>
- VTableLayoutMapTy;
-
- /// VTableLayoutMap - Stores the vtable layout for all record decls.
- /// The layout is stored as an array of 64-bit integers, where the first
- /// integer is the number of vtable entries in the layout, and the subsequent
- /// integers are the vtable components.
- VTableLayoutMapTy VTableLayoutMap;
-
- typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
- typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> AddressPointsMapTy;
-
- /// Address points - Address points for all vtables.
- AddressPointsMapTy AddressPoints;
-
/// VTableAddressPointsMapTy - Address points for a single vtable.
typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
- typedef llvm::SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
- VTableThunksTy;
-
- typedef llvm::DenseMap<const CXXRecordDecl *, VTableThunksTy>
- VTableThunksMapTy;
-
- /// VTableThunksMap - Contains thunks needed by vtables.
- VTableThunksMapTy VTableThunksMap;
-
- uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
- assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
-
- return VTableLayoutMap.lookup(RD).getPointer()[0];
- }
-
- const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
- assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
-
- uint64_t *Components = VTableLayoutMap.lookup(RD).getPointer();
- return &Components[1];
- }
-
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
/// SubVTTIndicies - Contains indices into the various sub-VTTs.
@@ -177,12 +52,6 @@ class CodeGenVTables {
/// indices.
SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
- /// getNumVirtualFunctionPointers - Return the number of virtual function
- /// pointers in the vtable for a given record decl.
- uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
-
- void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
-
/// EmitThunk - Emit a single thunk.
void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
bool UseAvailableExternallyLinkage);
@@ -193,24 +62,20 @@ class CodeGenVTables {
/// doesn't contain any incomplete types.
void MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk);
- /// ComputeVTableRelatedInformation - Compute and store all vtable related
- /// information (vtable layout, vbase offset offsets, thunks etc) for the
- /// given record decl.
- void ComputeVTableRelatedInformation(const CXXRecordDecl *RD,
- bool VTableRequired);
-
/// CreateVTableInitializer - Create a vtable initializer for the given record
/// decl.
/// \param Components - The vtable components; this is really an array of
/// VTableComponents.
llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
- const uint64_t *Components,
+ const VTableComponent *Components,
unsigned NumComponents,
- const VTableThunksTy &VTableThunks);
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks);
public:
- CodeGenVTables(CodeGenModule &CGM)
- : CGM(CGM) { }
+ CodeGenVTables(CodeGenModule &CGM);
+
+ VTableContext &getVTableContext() { return VTContext; }
/// \brief True if the VTable of this record must be emitted in the
/// translation unit.
@@ -230,19 +95,6 @@ public:
uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
BaseSubobject Base);
- /// getMethodVTableIndex - Return the index (relative to the vtable address
- /// point) where the function pointer for the given virtual function is
- /// stored.
- uint64_t getMethodVTableIndex(GlobalDecl GD);
-
- /// getVirtualBaseOffsetOffset - Return the offset in chars (relative to the
- /// vtable address point) where the offset of the virtual base that contains
- /// the given base is stored, otherwise, if no virtual base contains the given
- /// class, return 0. Base must be a virtual base class or an unambigious
- /// base.
- CharUnits getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase);
-
/// getAddressPoint - Get the address point of the given subobject in the
/// class decl.
uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 4d0b8410e451..489e600b3ddf 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -337,65 +337,90 @@ class AggValueSlot {
// Qualifiers
Qualifiers Quals;
+
+ /// DestructedFlag - This is set to true if some external code is
+ /// responsible for setting up a destructor for the slot. Otherwise
+ /// the code which constructs it should push the appropriate cleanup.
+ bool DestructedFlag : 1;
+
+ /// ObjCGCFlag - This is set to true if writing to the memory in the
+ /// slot might require calling an appropriate Objective-C GC
+ /// barrier. The exact interaction here is unnecessarily mysterious.
+ bool ObjCGCFlag : 1;
- // Associated flags.
- bool LifetimeFlag : 1;
- bool RequiresGCollection : 1;
-
- /// IsZeroed - This is set to true if the destination is known to be zero
- /// before the assignment into it. This means that zero fields don't need to
- /// be set.
- bool IsZeroed : 1;
+ /// ZeroedFlag - This is set to true if the memory in the slot is
+ /// known to be zero before the assignment into it. This means that
+ /// zero fields don't need to be set.
+ bool ZeroedFlag : 1;
+
+ /// AliasedFlag - This is set to true if the slot might be aliased
+ /// and it's not undefined behavior to access it through such an
+ /// alias. Note that it's always undefined behavior to access a C++
+ /// object that's under construction through an alias derived from
+ /// outside the construction process.
+ ///
+ /// This flag controls whether calls that produce the aggregate
+ /// value may be evaluated directly into the slot, or whether they
+ /// must be evaluated into an unaliased temporary and then memcpy'ed
+ /// over. Since it's invalid in general to memcpy a non-POD C++
+ /// object, it's important that this flag never be set when
+ /// evaluating an expression which constructs such an object.
+ bool AliasedFlag : 1;
public:
+ enum IsAliased_t { IsNotAliased, IsAliased };
+ enum IsDestructed_t { IsNotDestructed, IsDestructed };
+ enum IsZeroed_t { IsNotZeroed, IsZeroed };
+ enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
+
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
AggValueSlot AV;
AV.Addr = 0;
AV.Quals = Qualifiers();
- AV.LifetimeFlag = AV.RequiresGCollection = AV.IsZeroed =0;
+ AV.DestructedFlag = AV.ObjCGCFlag = AV.ZeroedFlag = AV.AliasedFlag = false;
return AV;
}
/// forAddr - Make a slot for an aggregate value.
///
- /// \param Volatile - true if the slot should be volatile-initialized
- ///
- /// \param Qualifiers - The qualifiers that dictate how the slot
- /// should be initialied. Only 'volatile' and the Objective-C
- /// lifetime qualifiers matter.
+ /// \param quals - The qualifiers that dictate how the slot should
+ /// be initialied. Only 'volatile' and the Objective-C lifetime
+ /// qualifiers matter.
///
- /// \param LifetimeExternallyManaged - true if the slot's lifetime
- /// is being externally managed; false if a destructor should be
- /// registered for any temporaries evaluated into the slot
- /// \param RequiresGCollection - true if the slot is located
+ /// \param isDestructed - true if something else is responsible
+ /// for calling destructors on this object
+ /// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
- static AggValueSlot forAddr(llvm::Value *Addr, Qualifiers Quals,
- bool LifetimeExternallyManaged,
- bool RequiresGCollection = false,
- bool IsZeroed = false) {
+ static AggValueSlot forAddr(llvm::Value *addr, Qualifiers quals,
+ IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
- AV.Addr = Addr;
- AV.Quals = Quals;
- AV.LifetimeFlag = LifetimeExternallyManaged;
- AV.RequiresGCollection = RequiresGCollection;
- AV.IsZeroed = IsZeroed;
+ AV.Addr = addr;
+ AV.Quals = quals;
+ AV.DestructedFlag = isDestructed;
+ AV.ObjCGCFlag = needsGC;
+ AV.ZeroedFlag = isZeroed;
+ AV.AliasedFlag = isAliased;
return AV;
}
- static AggValueSlot forLValue(LValue LV, bool LifetimeExternallyManaged,
- bool RequiresGCollection = false,
- bool IsZeroed = false) {
+ static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
return forAddr(LV.getAddress(), LV.getQuals(),
- LifetimeExternallyManaged, RequiresGCollection, IsZeroed);
+ isDestructed, needsGC, isAliased, isZeroed);
}
- bool isLifetimeExternallyManaged() const {
- return LifetimeFlag;
+ IsDestructed_t isExternallyDestructed() const {
+ return IsDestructed_t(DestructedFlag);
}
- void setLifetimeExternallyManaged(bool Managed = true) {
- LifetimeFlag = Managed;
+ void setExternallyDestructed(bool destructed = true) {
+ DestructedFlag = destructed;
}
Qualifiers getQualifiers() const { return Quals; }
@@ -408,8 +433,8 @@ public:
return Quals.getObjCLifetime();
}
- bool requiresGCollection() const {
- return RequiresGCollection;
+ NeedsGCBarriers_t requiresGCollection() const {
+ return NeedsGCBarriers_t(ObjCGCFlag);
}
llvm::Value *getAddr() const {
@@ -420,13 +445,17 @@ public:
return Addr == 0;
}
+ IsAliased_t isPotentiallyAliased() const {
+ return IsAliased_t(AliasedFlag);
+ }
+
RValue asRValue() const {
return RValue::getAggregate(getAddr(), isVolatile());
}
- void setZeroed(bool V = true) { IsZeroed = V; }
- bool isZeroed() const {
- return IsZeroed;
+ void setZeroed(bool V = true) { ZeroedFlag = V; }
+ IsZeroed_t isZeroed() const {
+ return IsZeroed_t(ZeroedFlag);
}
};
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 80e46d2be704..5e674a8d0110 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -14,6 +14,8 @@ add_clang_library(clangCodeGen
CGBuiltin.cpp
CGCall.cpp
CGClass.cpp
+ CGCUDANV.cpp
+ CGCUDARuntime.cpp
CGCXX.cpp
CGCXXABI.cpp
CGCleanup.cpp
@@ -31,6 +33,7 @@ add_clang_library(clangCodeGen
CGObjCGNU.cpp
CGObjCMac.cpp
CGObjCRuntime.cpp
+ CGOpenCLRuntime.cpp
CGRecordLayoutBuilder.cpp
CGRTTI.cpp
CGStmt.cpp
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 263e01e4f18a..68dd5c94dc01 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -30,12 +30,12 @@ using namespace llvm;
namespace clang {
class BackendConsumer : public ASTConsumer {
- Diagnostic &Diags;
+ DiagnosticsEngine &Diags;
BackendAction Action;
const CodeGenOptions &CodeGenOpts;
const TargetOptions &TargetOpts;
const LangOptions &LangOpts;
- llvm::raw_ostream *AsmOutStream;
+ raw_ostream *AsmOutStream;
ASTContext *Context;
Timer LLVMIRGeneration;
@@ -45,12 +45,12 @@ namespace clang {
llvm::OwningPtr<llvm::Module> TheModule;
public:
- BackendConsumer(BackendAction action, Diagnostic &_Diags,
+ BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
const CodeGenOptions &compopts,
const TargetOptions &targetopts,
const LangOptions &langopts,
bool TimePasses,
- const std::string &infile, llvm::raw_ostream *OS,
+ const std::string &infile, raw_ostream *OS,
LLVMContext &C) :
Diags(_Diags),
Action(action),
@@ -185,7 +185,7 @@ static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
// Translate the offset into the file.
unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
SourceLocation NewLoc =
- CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset);
+ CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset);
return FullSourceLoc(NewLoc, CSM);
}
@@ -199,7 +199,7 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
// we re-format the SMDiagnostic in terms of a clang diagnostic.
// Strip "error: " off the start of the message string.
- llvm::StringRef Message = D.getMessage();
+ StringRef Message = D.getMessage();
if (Message.startswith("error: "))
Message = Message.substr(7);
@@ -259,7 +259,7 @@ llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
}
static raw_ostream *GetOutputStream(CompilerInstance &CI,
- llvm::StringRef InFile,
+ StringRef InFile,
BackendAction Action) {
switch (Action) {
case Backend_EmitAssembly:
@@ -275,14 +275,13 @@ static raw_ostream *GetOutputStream(CompilerInstance &CI,
return CI.createDefaultOutputFile(true, InFile, "o");
}
- assert(0 && "Invalid action!");
- return 0;
+ llvm_unreachable("Invalid action!");
}
ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
+ StringRef InFile) {
BackendAction BA = static_cast<BackendAction>(Act);
- llvm::OwningPtr<llvm::raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ llvm::OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
if (BA != Backend_EmitNothing && !OS)
return 0;
@@ -320,17 +319,17 @@ void CodeGenAction::ExecuteAction() {
TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
if (!TheModule) {
// Translate from the diagnostic info to the SourceManager location.
- SourceLocation Loc = SM.getLocation(
+ SourceLocation Loc = SM.translateFileLineCol(
SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
Err.getColumnNo() + 1);
// Get a custom diagnostic for the error. We strip off a leading
// diagnostic code if there is one.
- llvm::StringRef Msg = Err.getMessage();
+ StringRef Msg = Err.getMessage();
if (Msg.startswith("error: "))
Msg = Msg.substr(7);
- unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error,
- Msg);
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
+ DiagnosticsEngine::Error, Msg);
CI.getDiagnostics().Report(Loc, DiagID);
return;
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 702897a7c448..8191f021da4a 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGException.h"
@@ -30,10 +31,10 @@ using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CodeGenTypeCache(cgm), CGM(cgm),
- Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
+ Target(CGM.getContext().getTargetInfo()), Builder(cgm.getModule().getContext()),
AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
- NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
- ExceptionSlot(0), EHSelectorSlot(0),
+ NormalCleanupDest(0), NextCleanupDestIndex(1),
+ EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
@@ -86,6 +87,10 @@ bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
case Type::ObjCObject:
case Type::ObjCInterface:
return true;
+
+ // In IRGen, atomic types are just the underlying type
+ case Type::Atomic:
+ return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType());
}
llvm_unreachable("unknown type kind!");
}
@@ -116,7 +121,8 @@ void CodeGenFunction::EmitReturnBlock() {
dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
if (BI && BI->isUnconditional() &&
BI->getSuccessor(0) == ReturnBlock.getBlock()) {
- // Reset insertion point and delete the branch.
+ // Reset insertion point, including debug location, and delete the branch.
+ Builder.SetCurrentDebugLocation(BI->getDebugLoc());
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
delete ReturnBlock.getBlock();
@@ -189,7 +195,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
}
}
- EmitIfUsed(*this, RethrowBlock.getBlock());
+ EmitIfUsed(*this, EHResumeBlock);
EmitIfUsed(*this, TerminateLandingPad);
EmitIfUsed(*this, TerminateHandler);
EmitIfUsed(*this, UnreachableBlock);
@@ -215,7 +221,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
llvm::PointerType *PointerTy = Int8PtrTy;
llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
- const llvm::FunctionType *FunctionTy =
+ llvm::FunctionType *FunctionTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
ProfileFuncArgs, false);
@@ -345,6 +351,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
+ // Emit a location at the end of the prologue.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, StartLoc);
}
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
@@ -364,9 +373,12 @@ static void TryMarkNoThrow(llvm::Function *F) {
for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
for (llvm::BasicBlock::iterator
BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
- if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
if (!Call->doesNotThrow())
return;
+ } else if (isa<llvm::ResumeInst>(&*BI)) {
+ return;
+ }
F->setDoesNotThrow(true);
}
@@ -400,6 +412,10 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
+ else if (getContext().getLangOptions().CUDA &&
+ !CGM.getCodeGenOpts().CUDAIsDevice &&
+ FD->hasAttr<CUDAGlobalAttr>())
+ CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
else
EmitFunctionBody(Args);
@@ -645,7 +661,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *baseSizeInChars
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
- const llvm::Type *i8p = Builder.getInt8PtrTy();
+ llvm::Type *i8p = Builder.getInt8PtrTy();
llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
@@ -690,9 +706,9 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Cast the dest ptr to the appropriate i8 pointer type.
unsigned DestAS =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
- const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
+ llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
if (DestPtr->getType() != BP)
- DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+ DestPtr = Builder.CreateBitCast(DestPtr, BP);
// Get size and alignment info for this aggregate.
std::pair<CharUnits, CharUnits> TypeInfo =
@@ -740,7 +756,7 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
- NullConstant, llvm::Twine());
+ NullConstant, Twine());
llvm::Value *SrcPtr =
Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
@@ -818,7 +834,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// We have some number of constant-length arrays, so addr should
// have LLVM type [M x [N x [...]]]*. Build a GEP that walks
// down to the first element of addr.
- llvm::SmallVector<llvm::Value*, 8> gepIndices;
+ SmallVector<llvm::Value*, 8> gepIndices;
// GEP down to the array type.
llvm::ConstantInt *zero = Builder.getInt32(0);
@@ -828,7 +844,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
// constant-length arrays than to re-evaluate the array bounds.
uint64_t countFromCLAs = 1;
- const llvm::ArrayType *llvmArrayType =
+ llvm::ArrayType *llvmArrayType =
cast<llvm::ArrayType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
while (true) {
@@ -850,8 +866,7 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
baseType = arrayType->getElementType();
// Create the actual GEP.
- addr = Builder.CreateInBoundsGEP(addr, gepIndices.begin(),
- gepIndices.end(), "array.begin");
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
llvm::Value *numElements
= llvm::ConstantInt::get(SizeTy, countFromCLAs);
@@ -975,6 +990,10 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::FunctionNoProto:
type = cast<FunctionType>(ty)->getResultType();
break;
+
+ case Type::Atomic:
+ type = cast<AtomicType>(ty)->getValueType();
+ break;
}
} while (type->isVariablyModifiedType());
}
@@ -1018,3 +1037,50 @@ void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
// In theory, we could try to duplicate the peepholes now, but whatever.
protection.Inst->eraseFromParent();
}
+
+llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location) {
+ llvm::Value *Args[4] = {
+ AnnotatedVal,
+ Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
+ Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationLineNo(Location)
+ };
+ return Builder.CreateCall(AnnotationFn, Args);
+}
+
+void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // FIXME We create a new bitcast for every annotation because that's what
+ // llvm-gcc was doing.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
+ Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
+ (*ai)->getAnnotation(), D->getLocation());
+}
+
+llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Type *VTy = V->getType();
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
+ CGM.Int8PtrTy);
+
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
+ // FIXME Always emit the cast inst so we can differentiate between
+ // annotation on the first field of a struct and annotation on the struct
+ // itself.
+ if (VTy != CGM.Int8PtrTy)
+ V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
+ V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
+ V = Builder.CreateBitCast(V, VTy);
+ }
+
+ return V;
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index f27ed947b8d8..157623da8fdd 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -325,16 +325,8 @@ private:
/// The innermost normal cleanup on the stack.
stable_iterator InnermostNormalCleanup;
- /// The innermost EH cleanup on the stack.
- stable_iterator InnermostEHCleanup;
-
- /// The number of catches on the stack.
- unsigned CatchDepth;
-
- /// The current EH destination index. Reset to FirstCatchIndex
- /// whenever the last EH cleanup is popped.
- unsigned NextEHDestIndex;
- enum { FirstEHDestIndex = 1 };
+ /// The innermost EH scope on the stack.
+ stable_iterator InnermostEHScope;
/// The current set of branch fixups. A branch fixup is a jump to
/// an as-yet unemitted label, i.e. a label for which we don't yet
@@ -353,7 +345,7 @@ private:
/// A a;
/// foo:
/// bar();
- llvm::SmallVector<BranchFixup, 8> BranchFixups;
+ SmallVector<BranchFixup, 8> BranchFixups;
char *allocate(size_t Size);
@@ -362,8 +354,7 @@ private:
public:
EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
InnermostNormalCleanup(stable_end()),
- InnermostEHCleanup(stable_end()),
- CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {}
+ InnermostEHScope(stable_end()) {}
~EHScopeStack() { delete[] StartOfBuffer; }
// Variadic templates would make this not terrible.
@@ -435,8 +426,7 @@ public:
return new (Buffer) T(N, a0, a1, a2);
}
- /// Pops a cleanup scope off the stack. This should only be called
- /// by CodeGenFunction::PopCleanupBlock.
+ /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
void popCleanup();
/// Push a set of catch handlers on the stack. The catch is
@@ -444,7 +434,7 @@ public:
/// set on it.
class EHCatchScope *pushCatch(unsigned NumHandlers);
- /// Pops a catch scope off the stack.
+ /// Pops a catch scope off the stack. This is private to CGException.cpp.
void popCatch();
/// Push an exceptions filter on the stack.
@@ -463,7 +453,7 @@ public:
bool empty() const { return StartOfData == EndOfBuffer; }
bool requiresLandingPad() const {
- return (CatchDepth || hasEHCleanups());
+ return InnermostEHScope != stable_end();
}
/// Determines whether there are any normal cleanups on the stack.
@@ -476,19 +466,13 @@ public:
stable_iterator getInnermostNormalCleanup() const {
return InnermostNormalCleanup;
}
- stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h
+ stable_iterator getInnermostActiveNormalCleanup() const;
- /// Determines whether there are any EH cleanups on the stack.
- bool hasEHCleanups() const {
- return InnermostEHCleanup != stable_end();
+ stable_iterator getInnermostEHScope() const {
+ return InnermostEHScope;
}
- /// Returns the innermost EH cleanup on the stack, or stable_end()
- /// if there are no EH cleanups.
- stable_iterator getInnermostEHCleanup() const {
- return InnermostEHCleanup;
- }
- stable_iterator getInnermostActiveEHCleanup() const; // CGException.h
+ stable_iterator getInnermostActiveEHScope() const;
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
@@ -515,10 +499,6 @@ public:
/// Translates an iterator into a stable_iterator.
stable_iterator stabilize(iterator it) const;
- /// Finds the nearest cleanup enclosing the given iterator.
- /// Returns stable_iterator::invalid() if there are no such cleanups.
- stable_iterator getEnclosingEHCleanup(iterator it) const;
-
/// Turn a stable reference to a scope depth into a unstable pointer
/// to the EH stack.
iterator find(stable_iterator save) const;
@@ -547,9 +527,6 @@ public:
/// Clears the branch-fixups list. This should only be called by
/// ResolveAllBranchFixups.
void clearFixups() { BranchFixups.clear(); }
-
- /// Gets the next EH destination index.
- unsigned getNextEHDestIndex() { return NextEHDestIndex++; }
};
/// CodeGenFunction - This class organizes the per-function state that is used
@@ -580,26 +557,6 @@ public:
unsigned Index;
};
- /// An unwind destination is an abstract label, branching to which
- /// may require a jump out through EH cleanups.
- struct UnwindDest {
- UnwindDest() : Block(0), ScopeDepth(), Index(0) {}
- UnwindDest(llvm::BasicBlock *Block,
- EHScopeStack::stable_iterator Depth,
- unsigned Index)
- : Block(Block), ScopeDepth(Depth), Index(Index) {}
-
- bool isValid() const { return Block != 0; }
- llvm::BasicBlock *getBlock() const { return Block; }
- EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
- unsigned getDestIndex() const { return Index; }
-
- private:
- llvm::BasicBlock *Block;
- EHScopeStack::stable_iterator ScopeDepth;
- unsigned Index;
- };
-
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
@@ -629,9 +586,6 @@ public:
/// iff the function has no return value.
llvm::Value *ReturnValue;
- /// RethrowBlock - Unified rethrow block.
- UnwindDest RethrowBlock;
-
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
@@ -652,16 +606,18 @@ public:
/// i32s containing the indexes of the cleanup destinations.
llvm::AllocaInst *NormalCleanupDest;
- llvm::AllocaInst *EHCleanupDest;
unsigned NextCleanupDestIndex;
- /// The exception slot. All landing pads write the current
- /// exception pointer into this alloca.
+ /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
+ llvm::BasicBlock *EHResumeBlock;
+
+ /// The exception slot. All landing pads write the current exception pointer
+ /// into this alloca.
llvm::Value *ExceptionSlot;
- /// The selector slot. Under the MandatoryCleanup model, all
- /// landing pads write the current selector value into this alloca.
+ /// The selector slot. Under the MandatoryCleanup model, all landing pads
+ /// write the current selector value into this alloca.
llvm::AllocaInst *EHSelectorSlot;
/// Emits a landing pad for the current EH stack.
@@ -681,7 +637,7 @@ public:
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
- llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+ SmallVector<llvm::Value*, 8> ObjCEHValueStack;
/// A class controlling the emission of a finally block.
class FinallyInfo {
@@ -872,7 +828,7 @@ public:
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
- JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) {
+ JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
return getJumpDestInCurrentScope(createBasicBlock(Name));
}
@@ -886,14 +842,13 @@ public:
/// a conservatively correct answer for this method.
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
- /// EmitBranchThroughEHCleanup - Emit a branch from the current
- /// insert block through the EH cleanup handling code (if any) and
- /// then on to \arg Dest.
- void EmitBranchThroughEHCleanup(UnwindDest Dest);
+ /// popCatchScope - Pops the catch scope at the top of the EHScope
+ /// stack, emitting any required code (other than the catch handlers
+ /// themselves).
+ void popCatchScope();
- /// getRethrowDest - Returns the unified outermost-scope rethrow
- /// destination.
- UnwindDest getRethrowDest();
+ llvm::BasicBlock *getEHResumeBlock();
+ llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
@@ -1089,7 +1044,7 @@ private:
JumpDest BreakBlock;
JumpDest ContinueBlock;
};
- llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+ SmallVector<BreakContinue, 8> BreakContinueStack;
/// SwitchInsn - This is nearest current switch instruction. It is null if if
/// current context is not in a switch.
@@ -1135,7 +1090,7 @@ private:
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
- llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
+ llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
unsigned> > ByRefValueInfo;
llvm::BasicBlock *TerminateLandingPad;
@@ -1161,13 +1116,17 @@ public:
const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
- /// Returns a pointer to the function's exception object slot, which
- /// is assigned in every landing pad.
+ /// Returns a pointer to the function's exception object and selector slot,
+ /// which is assigned in every landing pad.
llvm::Value *getExceptionSlot();
llvm::Value *getEHSelectorSlot();
+ /// Returns the contents of the function's exception object and selector
+ /// slots.
+ llvm::Value *getExceptionFromSlot();
+ llvm::Value *getSelectorFromSlot();
+
llvm::Value *getNormalCleanupDestSlot();
- llvm::Value *getEHCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
@@ -1248,9 +1207,8 @@ public:
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
- void GenerateObjCGetterBody(ObjCIvarDecl *Ivar, bool IsAtomic, bool IsStrong);
- void GenerateObjCAtomicSetterBody(ObjCMethodDecl *OMD,
- ObjCIvarDecl *Ivar);
+ void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
@@ -1259,6 +1217,8 @@ public:
/// for the given property.
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl);
bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
bool IvarTypeWithAggrGCObjects(QualType Ty);
@@ -1269,7 +1229,7 @@ public:
llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
- const llvm::StructType *,
+ llvm::StructType *,
llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
@@ -1298,7 +1258,7 @@ public:
return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
}
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
- const llvm::Type *BuildByRefType(const VarDecl *var);
+ llvm::Type *BuildByRefType(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
@@ -1352,7 +1312,7 @@ public:
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
- llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
@@ -1415,7 +1375,7 @@ public:
static bool hasAggregateLLVMType(QualType T);
/// createBasicBlock - Create an LLVM basic block.
- llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "",
+ llvm::BasicBlock *createBasicBlock(StringRef name = "",
llvm::Function *parent = 0,
llvm::BasicBlock *before = 0) {
#ifdef NDEBUG
@@ -1444,6 +1404,10 @@ public:
/// means the block can be ignored if it is unreachable.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+ /// EmitBlockAfterUses - Emit the given block somewhere hopefully
+ /// near its uses, and leave the insertion point in it.
+ void EmitBlockAfterUses(llvm::BasicBlock *BB);
+
/// EmitBranch - Emit a branch to the specified basic block from the current
/// insert block, taking care to avoid creation of branches from dummy
/// blocks. It is legal to call this function even if there is no current
@@ -1486,8 +1450,8 @@ public:
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
/// the alloca.
- llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
- const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
/// InitTempAlloca - Provide an initial value for the given alloca.
void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
@@ -1497,17 +1461,19 @@ public:
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
- llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
- llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+ llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
- AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
return AggValueSlot::forAddr(CreateMemTemp(T, Name), T.getQualifiers(),
- false);
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
}
/// Emit a cast to void* in the appropriate address space.
@@ -1708,8 +1674,8 @@ public:
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, llvm::Value *This);
- void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
- llvm::Value *NumElements);
+ void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
+ llvm::Value *NewPtr, llvm::Value *NumElements);
void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
@@ -2074,18 +2040,18 @@ public:
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- llvm::ArrayRef<llvm::Value *> Args,
- const llvm::Twine &Name = "");
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name = "");
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
- const llvm::Twine &Name = "");
+ const Twine &Name = "");
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
- const llvm::Type *Ty);
+ llvm::Type *Ty);
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *This, const llvm::Type *Ty);
+ llvm::Value *This, llvm::Type *Ty);
llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
- const llvm::Type *Ty);
+ llvm::Type *Ty);
llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -2110,6 +2076,9 @@ public:
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
+ RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E);
@@ -2122,14 +2091,14 @@ public:
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNeonCall(llvm::Function *F,
- llvm::SmallVectorImpl<llvm::Value*> &O,
+ SmallVectorImpl<llvm::Value*> &O,
const char *name,
unsigned shift = 0, bool rightshift = false);
llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
- llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
- llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
+ llvm::Value *BuildVector(const SmallVectorImpl<llvm::Value*> &Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -2163,7 +2132,7 @@ public:
bool ignored);
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
- llvm::Value *EmitARCRetainBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
void EmitARCRelease(llvm::Value *value, bool precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
@@ -2175,10 +2144,13 @@ public:
std::pair<LValue,llvm::Value*>
EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+ llvm::Value *EmitObjCThrowOperand(const Expr *expr);
+
llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
@@ -2311,6 +2283,25 @@ public:
void EmitCXXThrowExpr(const CXXThrowExpr *E);
+ RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
+
+ //===--------------------------------------------------------------------===//
+ // Annotations Emission
+ //===--------------------------------------------------------------------===//
+
+ /// Emit an annotation call (intrinsic or builtin).
+ llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location);
+
+ /// Emit local annotations for the local variable V, declared by D.
+ void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
+
+ /// Emit field annotations for the given field & value. Returns the
+ /// annotation result.
+ llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
+
//===--------------------------------------------------------------------===//
// Internal Helpers
//===--------------------------------------------------------------------===//
@@ -2370,7 +2361,7 @@ private:
/// Ty, into individual arguments on the provided vector \arg Args. See
/// ABIArgInfo::Expand.
void ExpandTypeToArgs(QualType Ty, RValue Src,
- llvm::SmallVector<llvm::Value*, 16> &Args,
+ SmallVector<llvm::Value*, 16> &Args,
llvm::FunctionType *IRFuncTy);
llvm::Value* EmitAsmInput(const AsmStmt &S,
@@ -2439,7 +2430,7 @@ private:
void EmitDeclMetadata();
CodeGenModule::ByrefHelpers *
- buildByrefHelpers(const llvm::StructType &byrefType,
+ buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission);
};
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 0668039a892c..924ec8448e86 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -16,8 +16,10 @@
#include "CodeGenFunction.h"
#include "CodeGenTBAA.h"
#include "CGCall.h"
+#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
#include "TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/AST/ASTContext.h"
@@ -27,7 +29,6 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
-#include "clang/Basic/Builtins.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -44,8 +45,10 @@
using namespace clang;
using namespace CodeGen;
+static const char AnnotationSection[] = "llvm.metadata";
+
static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
- switch (CGM.getContext().Target.getCXXABI()) {
+ switch (CGM.getContext().getTargetInfo().getCXXABI()) {
case CXXABI_ARM: return *CreateARMCXXABI(CGM);
case CXXABI_Itanium: return *CreateItaniumCXXABI(CGM);
case CXXABI_Microsoft: return *CreateMicrosoftCXXABI(CGM);
@@ -58,22 +61,25 @@ static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
- Diagnostic &diags)
+ DiagnosticsEngine &diags)
: Context(C), Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
ABI(createCXXABI(*this)),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI, CGO),
TBAA(0),
- VTables(*this), Runtime(0), DebugInfo(0), ARCData(0), RRData(0),
- CFConstantStringClassRef(0), ConstantStringClassRef(0),
+ VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
+ DebugInfo(0), ARCData(0), RRData(0), CFConstantStringClassRef(0),
+ ConstantStringClassRef(0), NSConstantStringType(0),
VMContext(M.getContext()),
- NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
- BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
BlockObjectAssign(0), BlockObjectDispose(0),
BlockDescriptorType(0), GenericBlockLiteralType(0) {
if (Features.ObjC1)
- createObjCRuntime();
+ createObjCRuntime();
+ if (Features.OpenCL)
+ createOpenCLRuntime();
+ if (Features.CUDA)
+ createCUDARuntime();
// Enable TBAA unless it's suppressed.
if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
@@ -98,17 +104,20 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
- PointerWidthInBits = C.Target.getPointerWidth(0);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
- C.toCharUnitsFromBits(C.Target.getPointerAlign(0)).getQuantity();
- IntTy = llvm::IntegerType::get(LLVMContext, C.Target.getIntWidth());
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
Int8PtrTy = Int8Ty->getPointerTo(0);
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
}
CodeGenModule::~CodeGenModule() {
- delete Runtime;
+ delete ObjCRuntime;
+ delete OpenCLRuntime;
+ delete CUDARuntime;
+ delete TheTargetCodeGenInfo;
delete &ABI;
delete TBAA;
delete DebugInfo;
@@ -118,21 +127,29 @@ CodeGenModule::~CodeGenModule() {
void CodeGenModule::createObjCRuntime() {
if (!Features.NeXTRuntime)
- Runtime = CreateGNUObjCRuntime(*this);
+ ObjCRuntime = CreateGNUObjCRuntime(*this);
else
- Runtime = CreateMacObjCRuntime(*this);
+ ObjCRuntime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::createOpenCLRuntime() {
+ OpenCLRuntime = new CGOpenCLRuntime(*this);
+}
+
+void CodeGenModule::createCUDARuntime() {
+ CUDARuntime = CreateNVCUDARuntime(*this);
}
void CodeGenModule::Release() {
EmitDeferred();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
- if (Runtime)
- if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+ if (ObjCRuntime)
+ if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
- EmitAnnotations();
+ EmitGlobalAnnotations();
EmitLLVMUsed();
SimplifyPersonality();
@@ -142,6 +159,9 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
+
+ if (DebugInfo)
+ DebugInfo->finalize();
}
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
@@ -163,11 +183,11 @@ void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
}
bool CodeGenModule::isTargetDarwin() const {
- return getContext().Target.getTriple().isOSDarwin();
+ return getContext().getTargetInfo().getTriple().isOSDarwin();
}
-void CodeGenModule::Error(SourceLocation loc, llvm::StringRef error) {
- unsigned diagID = getDiags().getCustomDiagID(Diagnostic::Error, error);
+void CodeGenModule::Error(SourceLocation loc, StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, error);
getDiags().Report(Context.getFullLoc(loc), diagID);
}
@@ -177,7 +197,7 @@ void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
bool OmitOnError) {
if (OmitOnError && getDiags().hasErrorOccurred())
return;
- unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
@@ -190,7 +210,7 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
bool OmitOnError) {
if (OmitOnError && getDiags().hasErrorOccurred())
return;
- unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
@@ -281,10 +301,10 @@ void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
GV->setUnnamedAddr(true);
}
-llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
- llvm::StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
if (!Str.empty())
return Str;
@@ -313,7 +333,7 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
char *Name = MangledNamesAllocator.Allocate<char>(Length);
std::copy(Buffer.begin(), Buffer.end(), Name);
- Str = llvm::StringRef(Name, Length);
+ Str = StringRef(Name, Length);
return Str;
}
@@ -333,7 +353,7 @@ void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
}
-llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
return getModule().getNamedValue(Name);
}
@@ -380,22 +400,6 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
}
}
-void CodeGenModule::EmitAnnotations() {
- if (Annotations.empty())
- return;
-
- // Create a new global variable for the ConstantStruct in the Module.
- llvm::Constant *Array =
- llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
- Annotations.size()),
- Annotations);
- llvm::GlobalValue *gv =
- new llvm::GlobalVariable(TheModule, Array->getType(), false,
- llvm::GlobalValue::AppendingLinkage, Array,
- "llvm.global.annotations");
- gv->setSection("llvm.metadata");
-}
-
llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
@@ -413,7 +417,12 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
// definition somewhere else, so we can use available_externally linkage.
if (Linkage == GVA_C99Inline)
return llvm::Function::AvailableExternallyLinkage;
-
+
+ // Note that Apple's kernel linker doesn't support symbol
+ // coalescing, so we need to avoid linkonce and weak linkages there.
+ // Normally, this means we just map to internal, but for explicit
+ // instantiations we'll map to external.
+
// In C++, the compiler has to emit a definition in every translation unit
// that references the function. We should use linkonce_odr because
// a) if all references in this translation unit are optimized away, we
@@ -432,7 +441,7 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
if (Linkage == GVA_ExplicitTemplateInstantiation)
return !Context.getLangOptions().AppleKext
? llvm::Function::WeakODRLinkage
- : llvm::Function::InternalLinkage;
+ : llvm::Function::ExternalLinkage;
// Otherwise, we have strong external linkage.
assert(Linkage == GVA_StrongExternal);
@@ -460,29 +469,54 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
+/// Determines whether the language options require us to model
+/// unwind exceptions. We treat -fexceptions as mandating this
+/// except under the fragile ObjC ABI with only ObjC exceptions
+/// enabled. This means, for example, that C with -fexceptions
+/// enables this.
+static bool hasUnwindExceptions(const LangOptions &Features) {
+ // If exceptions are completely disabled, obviously this is false.
+ if (!Features.Exceptions) return false;
+
+ // If C++ exceptions are enabled, this is true.
+ if (Features.CXXExceptions) return true;
+
+ // If ObjC exceptions are enabled, this depends on the ABI.
+ if (Features.ObjCExceptions) {
+ if (!Features.ObjCNonFragileABI) return false;
+ }
+
+ return true;
+}
+
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
if (CodeGenOpts.UnwindTables)
F->setHasUWTable();
- if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+ if (!hasUnwindExceptions(Features))
F->addFnAttr(llvm::Attribute::NoUnwind);
- if (D->hasAttr<AlwaysInlineAttr>())
- F->addFnAttr(llvm::Attribute::AlwaysInline);
-
- if (D->hasAttr<NakedAttr>())
+ if (D->hasAttr<NakedAttr>()) {
+ // Naked implies noinline: we should not be inlining such functions.
F->addFnAttr(llvm::Attribute::Naked);
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
if (D->hasAttr<NoInlineAttr>())
F->addFnAttr(llvm::Attribute::NoInline);
+ // (noinline wins over always_inline, and we can't specify both in IR)
+ if (D->hasAttr<AlwaysInlineAttr>() &&
+ !F->hasFnAttr(llvm::Attribute::NoInline))
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
- if (Features.getStackProtectorMode() == LangOptions::SSPOn)
+ if (Features.getStackProtector() == LangOptions::SSPOn)
F->addFnAttr(llvm::Attribute::StackProtect);
- else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
+ else if (Features.getStackProtector() == LangOptions::SSPReq)
F->addFnAttr(llvm::Attribute::StackProtectReq);
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
@@ -570,7 +604,7 @@ void CodeGenModule::EmitLLVMUsed() {
if (LLVMUsed.empty())
return;
- const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
// Convert LLVMUsed to what ConstantArray needs.
std::vector<llvm::Constant*> UsedArray;
@@ -597,7 +631,7 @@ void CodeGenModule::EmitLLVMUsed() {
void CodeGenModule::EmitDeferred() {
// Emit code for any potentially referenced deferred decls. Since a
// previously unused static decl may become used during the generation of code
- // for a static function, iterate until no changes are made.
+ // for a static function, iterate until no changes are made.
while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
if (!DeferredVTables.empty()) {
@@ -618,7 +652,7 @@ void CodeGenModule::EmitDeferred() {
// ignore these cases.
//
// TODO: That said, looking this up multiple times is very wasteful.
- llvm::StringRef Name = getMangledName(D);
+ StringRef Name = getMangledName(D);
llvm::GlobalValue *CGRef = GetGlobalValue(Name);
assert(CGRef && "Deferred decl wasn't referenced?");
@@ -635,54 +669,78 @@ void CodeGenModule::EmitDeferred() {
}
}
-/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
-/// annotation information for a given GlobalValue. The annotation struct is
-/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
-/// GlobalValue being annotated. The second field is the constant string
-/// created from the AnnotateAttr's annotation. The third field is a constant
-/// string containing the name of the translation unit. The fourth field is
-/// the line number in the file of the annotated value declaration.
-///
-/// FIXME: this does not unique the annotation string constants, as llvm-gcc
-/// appears to.
-///
+void CodeGenModule::EmitGlobalAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
+ Annotations[0]->getType(), Annotations.size()), Annotations);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(),
+ Array->getType(), false, llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection(AnnotationSection);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationString(llvm::StringRef Str) {
+ llvm::StringMap<llvm::Constant*>::iterator i = AnnotationStrings.find(Str);
+ if (i != AnnotationStrings.end())
+ return i->second;
+
+ // Not found yet, create a new global.
+ llvm::Constant *s = llvm::ConstantArray::get(getLLVMContext(), Str, true);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(), s->getType(),
+ true, llvm::GlobalValue::PrivateLinkage, s, ".str");
+ gv->setSection(AnnotationSection);
+ gv->setUnnamedAddr(true);
+ AnnotationStrings[Str] = gv;
+ return gv;
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isValid())
+ return EmitAnnotationString(PLoc.getFilename());
+ return EmitAnnotationString(SM.getBufferName(Loc));
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
+ SM.getExpansionLineNumber(L);
+ return llvm::ConstantInt::get(Int32Ty, LineNo);
+}
+
llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA,
- unsigned LineNo) {
- llvm::Module *M = &getModule();
-
- // get [N x i8] constants for the annotation string, and the filename string
- // which are the 2nd and 3rd elements of the global annotation structure.
- const llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
- llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
- AA->getAnnotation(), true);
- llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
- M->getModuleIdentifier(),
- true);
-
- // Get the two global values corresponding to the ConstantArrays we just
- // created to hold the bytes of the strings.
- llvm::GlobalValue *annoGV =
- new llvm::GlobalVariable(*M, anno->getType(), false,
- llvm::GlobalValue::PrivateLinkage, anno,
- GV->getName());
- // translation unit name string, emitted into the llvm.metadata section.
- llvm::GlobalValue *unitGV =
- new llvm::GlobalVariable(*M, unit->getType(), false,
- llvm::GlobalValue::PrivateLinkage, unit,
- ".str");
- unitGV->setUnnamedAddr(true);
+ SourceLocation L) {
+ // Get the globals for file name, annotation, and the line number.
+ llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
+ *UnitGV = EmitAnnotationUnit(L),
+ *LineNoCst = EmitAnnotationLineNo(L);
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[4] = {
- llvm::ConstantExpr::getBitCast(GV, SBP),
- llvm::ConstantExpr::getBitCast(annoGV, SBP),
- llvm::ConstantExpr::getBitCast(unitGV, SBP),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
+ llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
+ LineNoCst
};
return llvm::ConstantStruct::getAnon(Fields);
}
+void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
+ llvm::GlobalValue *GV) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // Get the struct elements for these annotations.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ Annotations.push_back(EmitAnnotateAttr(GV, *ai, D->getLocation()));
+}
+
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
if (Features.EmitAllDecls)
@@ -695,7 +753,7 @@ llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
- const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
@@ -728,34 +786,45 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (Global->hasAttr<AliasAttr>())
return EmitAliasDefinition(GD);
- // Ignore declarations, they will be emitted on their first use.
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
- if (FD->getIdentifier()) {
- llvm::StringRef Name = FD->getName();
- if (Name == "_Block_object_assign") {
- BlockObjectAssignDecl = FD;
- } else if (Name == "_Block_object_dispose") {
- BlockObjectDisposeDecl = FD;
- }
+ // If this is CUDA, be selective about which declarations we emit.
+ if (Features.CUDA) {
+ if (CodeGenOpts.CUDAIsDevice) {
+ if (!Global->hasAttr<CUDADeviceAttr>() &&
+ !Global->hasAttr<CUDAGlobalAttr>() &&
+ !Global->hasAttr<CUDAConstantAttr>() &&
+ !Global->hasAttr<CUDASharedAttr>())
+ return;
+ } else {
+ if (!Global->hasAttr<CUDAHostAttr>() && (
+ Global->hasAttr<CUDADeviceAttr>() ||
+ Global->hasAttr<CUDAConstantAttr>() ||
+ Global->hasAttr<CUDASharedAttr>()))
+ return;
}
+ }
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
// Forward declarations are emitted lazily on first use.
- if (!FD->doesThisDeclarationHaveABody())
+ if (!FD->doesThisDeclarationHaveABody()) {
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ return;
+
+ const FunctionDecl *InlineDefinition = 0;
+ FD->getBody(InlineDefinition);
+
+ StringRef MangledName = getMangledName(GD);
+ llvm::StringMap<GlobalDecl>::iterator DDI =
+ DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end())
+ DeferredDecls.erase(DDI);
+ EmitGlobalDefinition(InlineDefinition);
return;
+ }
} else {
const VarDecl *VD = cast<VarDecl>(Global);
assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
- if (VD->getIdentifier()) {
- llvm::StringRef Name = VD->getName();
- if (Name == "_NSConcreteGlobalBlock") {
- NSConcreteGlobalBlockDecl = VD;
- } else if (Name == "_NSConcreteStackBlock") {
- NSConcreteStackBlockDecl = VD;
- }
- }
-
-
if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
return;
}
@@ -778,7 +847,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// If the value has already been used, add it directly to the
// DeferredDeclsToEmit list.
- llvm::StringRef MangledName = getMangledName(GD);
+ StringRef MangledName = getMangledName(GD);
if (GetGlobalValue(MangledName))
DeferredDeclsToEmit.push_back(GD);
else {
@@ -827,7 +896,7 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return EmitGlobalVarDefinition(VD);
- assert(0 && "Invalid argument to EmitGlobalDefinition()");
+ llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
}
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
@@ -838,8 +907,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the function when it is first created.
llvm::Constant *
-CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
- const llvm::Type *Ty,
+CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
GlobalDecl D, bool ForVTable,
llvm::Attributes ExtraAttrs) {
// Lookup the entry, lazily creating it if necessary.
@@ -865,7 +934,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
// sure not to try to set attributes.
bool IsIncompleteFunction = false;
- const llvm::FunctionType *FTy;
+ llvm::FunctionType *FTy;
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
@@ -935,21 +1004,21 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty,
+ llvm::Type *Ty,
bool ForVTable) {
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
- llvm::StringRef MangledName = getMangledName(GD);
+ StringRef MangledName = getMangledName(GD);
return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
}
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *
-CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
- llvm::StringRef Name,
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
+ StringRef Name,
llvm::Attributes ExtraAttrs) {
return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
ExtraAttrs);
@@ -979,8 +1048,8 @@ static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D,
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the global when it is first created.
llvm::Constant *
-CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
- const llvm::PointerType *Ty,
+CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *Ty,
const VarDecl *D,
bool UnnamedAddr) {
// Lookup the entry, lazily creating it if necessary.
@@ -1049,8 +1118,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
llvm::GlobalVariable *
-CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
- const llvm::Type *Ty,
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
+ llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
llvm::GlobalVariable *OldGV = 0;
@@ -1092,24 +1161,24 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name,
/// then it will be greated with the specified type instead of whatever the
/// normal requested type would be.
llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
- const llvm::Type *Ty) {
+ llvm::Type *Ty) {
assert(D->hasGlobalStorage() && "Not a global variable");
QualType ASTTy = D->getType();
if (Ty == 0)
Ty = getTypes().ConvertTypeForMem(ASTTy);
- const llvm::PointerType *PTy =
+ llvm::PointerType *PTy =
llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
- llvm::StringRef MangledName = getMangledName(D);
+ StringRef MangledName = getMangledName(D);
return GetOrCreateLLVMGlobal(MangledName, PTy, D);
}
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *
-CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
- llvm::StringRef Name) {
+CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name) {
return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
true);
}
@@ -1121,7 +1190,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
// If we have not seen a reference to this variable yet, place it
// into the deferred declarations table to be emitted if needed
// later.
- llvm::StringRef MangledName = getMangledName(D);
+ StringRef MangledName = getMangledName(D);
if (!GetGlobalValue(MangledName)) {
DeferredDecls[MangledName] = D;
return;
@@ -1207,7 +1276,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::LinkOnceODRLinkage;
}
-CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
+CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
return Context.toCharUnitsFromBits(
TheTargetData.getTypeStoreSizeInBits(Ty));
}
@@ -1253,7 +1322,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
}
}
- const llvm::Type* InitType = Init->getType();
+ llvm::Type* InitType = Init->getType();
llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
// Strip off a bitcast if we got one back.
@@ -1282,7 +1351,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
getContext().getTargetAddressSpace(ASTTy)) {
// Move the old entry aside so that we'll create a new one.
- Entry->setName(llvm::StringRef());
+ Entry->setName(StringRef());
// Make a new global with the correct type, this is now guaranteed to work.
GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
@@ -1296,11 +1365,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
cast<llvm::GlobalValue>(Entry)->eraseFromParent();
}
- if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
- SourceManager &SM = Context.getSourceManager();
- AddAnnotation(EmitAnnotateAttr(GV, AA,
- SM.getInstantiationLineNumber(D->getLocation())));
- }
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, GV);
GV->setInitializer(Init);
@@ -1326,10 +1392,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
EmitCXXGlobalVarDeclInitFunc(D, GV);
// Emit global variable debug information.
- if (CGDebugInfo *DI = getModuleDebugInfo()) {
- DI->setLocation(D->getLocation());
+ if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitGlobalVariable(GV, D);
- }
}
llvm::GlobalValue::LinkageTypes
@@ -1377,8 +1441,8 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
if (OldFn == 0) return;
- const llvm::Type *NewRetTy = NewFn->getReturnType();
- llvm::SmallVector<llvm::Value*, 4> ArgList;
+ llvm::Type *NewRetTy = NewFn->getReturnType();
+ SmallVector<llvm::Value*, 4> ArgList;
for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
UI != E; ) {
@@ -1394,6 +1458,17 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
if (CI->getType() != NewRetTy && !CI->use_empty())
continue;
+ // Get the attribute list.
+ llvm::SmallVector<llvm::AttributeWithIndex, 8> AttrVec;
+ llvm::AttrListPtr AttrList = CI->getAttributes();
+
+ // Get any return attributes.
+ llvm::Attributes RAttrs = AttrList.getRetAttributes();
+
+ // Add the return attributes.
+ if (RAttrs)
+ AttrVec.push_back(llvm::AttributeWithIndex::get(0, RAttrs));
+
// If the function was passed too few arguments, don't transform. If extra
// arguments were passed, we silently drop them. If any of the types
// mismatch, we don't transform.
@@ -1406,10 +1481,17 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
DontTransform = true;
break;
}
+
+ // Add any parameter attributes.
+ if (llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1))
+ AttrVec.push_back(llvm::AttributeWithIndex::get(ArgNo + 1, PAttrs));
}
if (DontTransform)
continue;
+ if (llvm::Attributes FnAttrs = AttrList.getFnAttributes())
+ AttrVec.push_back(llvm::AttributeWithIndex::get(~0, FnAttrs));
+
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
@@ -1417,7 +1499,8 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
ArgList.clear();
if (!NewCall->getType()->isVoidTy())
NewCall->takeName(CI);
- NewCall->setAttributes(CI->getAttributes());
+ NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec.begin(),
+ AttrVec.end()));
NewCall->setCallingConv(CI->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -1440,7 +1523,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
bool variadic = false;
if (const FunctionProtoType *fpt = D->getType()->getAs<FunctionProtoType>())
variadic = fpt->isVariadic();
- const llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI, variadic);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1467,7 +1550,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
// (e.g. "int f()") and then a definition of a different type
// (e.g. "int f(int x)"). Move the old function aside so that it
// doesn't interfere with GetAddrOfFunction.
- OldFn->setName(llvm::StringRef());
+ OldFn->setName(StringRef());
llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
// If this is an implementation of a function without a prototype, try to
@@ -1510,6 +1593,8 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
AddGlobalCtor(Fn, CA->getPriority());
if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
AddGlobalDtor(Fn, DA->getPriority());
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, Fn);
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -1517,7 +1602,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
const AliasAttr *AA = D->getAttr<AliasAttr>();
assert(AA && "Not an alias?");
- llvm::StringRef MangledName = getMangledName(GD);
+ StringRef MangledName = getMangledName(GD);
// If there is a definition in the module, then it wins over the alias.
// This is dubious, but allow it to be safe. Just ignore the alias.
@@ -1525,7 +1610,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
if (Entry && !Entry->isDeclaration())
return;
- const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
// Create a reference to the named value. This ensures that it is emitted
// if a deferred decl.
@@ -1582,37 +1667,8 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
SetCommonAttributes(D, GA);
}
-/// getBuiltinLibFunction - Given a builtin id for a function like
-/// "__builtin_fabsf", return a Function* for "fabsf".
-llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
- unsigned BuiltinID) {
- assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
- Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
- "isn't a lib fn");
-
- // Get the name, skip over the __builtin_ prefix (if necessary).
- llvm::StringRef Name;
- GlobalDecl D(FD);
-
- // If the builtin has been declared explicitly with an assembler label,
- // use the mangled name. This differs from the plain label on platforms
- // that prefix labels.
- if (FD->hasAttr<AsmLabelAttr>())
- Name = getMangledName(D);
- else if (Context.BuiltinInfo.isLibFunction(BuiltinID))
- Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
- else
- Name = Context.BuiltinInfo.GetName(BuiltinID);
-
-
- const llvm::FunctionType *Ty =
- cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
-
- return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
-}
-
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
- llvm::ArrayRef<llvm::Type*> Tys) {
+ ArrayRef<llvm::Type*> Tys) {
return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
Tys);
}
@@ -1623,7 +1679,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
bool TargetIsLSB,
bool &IsUTF16,
unsigned &StringLength) {
- llvm::StringRef String = Literal->getString();
+ StringRef String = Literal->getString();
unsigned NumBytes = String.size();
// Check for simple case.
@@ -1633,7 +1689,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
}
// Otherwise, convert the UTF8 literals into a byte string.
- llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
+ SmallVector<UTF16, 128> ToBuf(NumBytes);
const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
@@ -1665,7 +1721,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
AsBytes.push_back(0);
IsUTF16 = true;
- return Map.GetOrCreateValue(llvm::StringRef(AsBytes.data(), AsBytes.size()));
+ return Map.GetOrCreateValue(StringRef(AsBytes.data(), AsBytes.size()));
}
static llvm::StringMapEntry<llvm::Constant*> &
@@ -1673,7 +1729,7 @@ GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
const StringLiteral *Literal,
unsigned &StringLength)
{
- llvm::StringRef String = Literal->getString();
+ StringRef String = Literal->getString();
StringLength = String.size();
return Map.GetOrCreateValue(String);
}
@@ -1696,18 +1752,18 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
- const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
llvm::Constant *GV = CreateRuntimeVariable(Ty,
"__CFConstantStringClassReference");
// Decay array -> ptr
CFConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
}
QualType CFTy = getContext().getCFConstantStringType();
- const llvm::StructType *STy =
+ llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(CFTy));
std::vector<llvm::Constant*> Fields(4);
@@ -1716,7 +1772,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
Fields[0] = CFConstantStringClassRef;
// Flags.
- const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
llvm::ConstantInt::get(Ty, 0x07C8);
@@ -1750,7 +1806,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
}
- Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
// String length.
Ty = getTypes().ConvertType(getContext().LongTy);
@@ -1761,13 +1817,23 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
- if (const char *Sect = getContext().Target.getCFStringSection())
+ if (const char *Sect = getContext().getTargetInfo().getCFStringSection())
GV->setSection(Sect);
Entry.setValue(GV);
return GV;
}
+static RecordDecl *
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
+ DeclContext *DC, IdentifierInfo *Id) {
+ SourceLocation Loc;
+ if (Ctx.getLangOptions().CPlusPlus)
+ return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+ else
+ return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+}
+
llvm::Constant *
CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
@@ -1784,7 +1850,7 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
std::string StringClass(getLangOptions().ObjCConstantStringClass);
- const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
if (Features.ObjCNonFragileABI) {
std::string str =
@@ -1792,25 +1858,54 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
: "OBJC_CLASS_$_" + StringClass;
GV = getObjCRuntime().GetClassGlobal(str);
// Make sure the result is of the correct type.
- const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
ConstantStringClassRef =
llvm::ConstantExpr::getBitCast(GV, PTy);
} else {
std::string str =
StringClass.empty() ? "_NSConstantStringClassReference"
: "_" + StringClass + "ClassReference";
- const llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
GV = CreateRuntimeVariable(PTy, str);
// Decay array -> ptr
ConstantStringClassRef =
- llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
}
}
-
- QualType NSTy = getContext().getNSConstantStringType();
-
- const llvm::StructType *STy =
- cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+
+ if (!NSConstantStringType) {
+ // Construct the type for a constant NSString.
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__builtin_NSString"));
+ D->startDefinition();
+
+ QualType FieldTypes[3];
+
+ // const int *isa;
+ FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
+ // const char *str;
+ FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
+ // unsigned int length;
+ FieldTypes[2] = Context.UnsignedIntTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 3; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context, D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ QualType NSTy = Context.getTagDeclType(D);
+ NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+ }
std::vector<llvm::Constant*> Fields(3);
@@ -1831,28 +1926,63 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
GV->setUnnamedAddr(true);
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
- Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
// String length.
- const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
- C = llvm::ConstantStruct::get(STy, Fields);
+ C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_nsstring_");
// FIXME. Fix section.
if (const char *Sect =
Features.ObjCNonFragileABI
- ? getContext().Target.getNSStringNonFragileABISection()
- : getContext().Target.getNSStringSection())
+ ? getContext().getTargetInfo().getNSStringNonFragileABISection()
+ : getContext().getTargetInfo().getNSStringSection())
GV->setSection(Sect);
Entry.setValue(GV);
return GV;
}
+QualType CodeGenModule::getObjCFastEnumerationStateType() {
+ if (ObjCFastEnumerationStateType.isNull()) {
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__objcFastEnumerationState"));
+ D->startDefinition();
+
+ QualType FieldTypes[] = {
+ Context.UnsignedLongTy,
+ Context.getPointerType(Context.getObjCIdType()),
+ Context.getPointerType(Context.UnsignedLongTy),
+ Context.getConstantArrayType(Context.UnsignedLongTy,
+ llvm::APInt(32, 5), ArrayType::Normal, 0)
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context,
+ D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ }
+
+ return ObjCFastEnumerationStateType;
+}
+
/// GetStringForStringLiteral - Return the appropriate bytes for a
/// string literal, properly padded to match the literal type.
std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
@@ -1864,8 +1994,20 @@ std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
// Resize the string to the right size.
uint64_t RealLen = CAT->getSize().getZExtValue();
- if (E->isWide())
- RealLen *= Context.Target.getWCharWidth() / Context.getCharWidth();
+ switch (E->getKind()) {
+ case StringLiteral::Ascii:
+ case StringLiteral::UTF8:
+ break;
+ case StringLiteral::Wide:
+ RealLen *= Context.getTargetInfo().getWCharWidth() / Context.getCharWidth();
+ break;
+ case StringLiteral::UTF16:
+ RealLen *= Context.getTargetInfo().getChar16Width() / Context.getCharWidth();
+ break;
+ case StringLiteral::UTF32:
+ RealLen *= Context.getTargetInfo().getChar32Width() / Context.getCharWidth();
+ break;
+ }
std::string Str = E->getString().str();
Str.resize(RealLen, '\0');
@@ -1879,8 +2021,11 @@ llvm::Constant *
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
// FIXME: This can be more efficient.
// FIXME: We shouldn't need to bitcast the constant in the wide string case.
- llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S));
- if (S->isWide()) {
+ CharUnits Align = getContext().getTypeAlignInChars(S->getType());
+ llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S),
+ /* GlobalName */ 0,
+ Align.getQuantity());
+ if (S->isWide() || S->isUTF16() || S->isUTF32()) {
llvm::Type *DestTy =
llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
C = llvm::ConstantExpr::getBitCast(C, DestTy);
@@ -1900,10 +2045,11 @@ CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
/// GenerateWritableString -- Creates storage for a string literal.
-static llvm::Constant *GenerateStringLiteral(llvm::StringRef str,
+static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
bool constant,
CodeGenModule &CGM,
- const char *GlobalName) {
+ const char *GlobalName,
+ unsigned Alignment) {
// Create Constant for this string literal. Don't add a '\0'.
llvm::Constant *C =
llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
@@ -1913,7 +2059,7 @@ static llvm::Constant *GenerateStringLiteral(llvm::StringRef str,
new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
llvm::GlobalValue::PrivateLinkage,
C, GlobalName);
- GV->setAlignment(1);
+ GV->setAlignment(Alignment);
GV->setUnnamedAddr(true);
return GV;
}
@@ -1926,8 +2072,9 @@ static llvm::Constant *GenerateStringLiteral(llvm::StringRef str,
/// Feature.WriteableStrings.
///
/// The result has pointer to array type.
-llvm::Constant *CodeGenModule::GetAddrOfConstantString(llvm::StringRef Str,
- const char *GlobalName) {
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName,
+ unsigned Alignment) {
bool IsConstant = !Features.WritableStrings;
// Get the default prefix if a name wasn't specified.
@@ -1936,27 +2083,32 @@ llvm::Constant *CodeGenModule::GetAddrOfConstantString(llvm::StringRef Str,
// Don't share any string literals if strings aren't constant.
if (!IsConstant)
- return GenerateStringLiteral(Str, false, *this, GlobalName);
+ return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
- llvm::StringMapEntry<llvm::Constant *> &Entry =
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
ConstantStringMap.GetOrCreateValue(Str);
- if (Entry.getValue())
- return Entry.getValue();
+ if (llvm::GlobalVariable *GV = Entry.getValue()) {
+ if (Alignment > GV->getAlignment()) {
+ GV->setAlignment(Alignment);
+ }
+ return GV;
+ }
// Create a global variable for this.
- llvm::Constant *C = GenerateStringLiteral(Str, true, *this, GlobalName);
- Entry.setValue(C);
- return C;
+ llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName, Alignment);
+ Entry.setValue(GV);
+ return GV;
}
/// GetAddrOfConstantCString - Returns a pointer to a character
/// array containing the literal and a terminating '\0'
/// character. The result has pointer to array type.
llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
- const char *GlobalName){
- llvm::StringRef StrWithNull(Str.c_str(), Str.size() + 1);
- return GetAddrOfConstantString(StrWithNull, GlobalName);
+ const char *GlobalName,
+ unsigned Alignment) {
+ StringRef StrWithNull(Str.c_str(), Str.size() + 1);
+ return GetAddrOfConstantString(StrWithNull, GlobalName, Alignment);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -1988,9 +2140,8 @@ void CodeGenModule::EmitObjCPropertyImplementations(const
}
static bool needsDestructMethod(ObjCImplementationDecl *impl) {
- ObjCInterfaceDecl *iface
- = const_cast<ObjCInterfaceDecl*>(impl->getClassInterface());
- for (ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
ivar; ivar = ivar->getNextIvar())
if (ivar->getType().isDestructedType())
return true;
@@ -2007,8 +2158,10 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
ObjCMethodDecl *DTORMethod =
ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
- cxxSelector, getContext().VoidTy, 0, D, true,
- false, true, false, ObjCMethodDecl::Required);
+ cxxSelector, getContext().VoidTy, 0, D,
+ /*isInstance=*/true, /*isVariadic=*/false,
+ /*isSynthesized=*/true, /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
D->setHasCXXStructors(true);
@@ -2024,9 +2177,14 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
// The constructor returns 'self'.
ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
D->getLocation(),
- D->getLocation(), cxxSelector,
+ D->getLocation(),
+ cxxSelector,
getContext().getObjCIdType(), 0,
- D, true, false, true, false,
+ D, /*isInstance=*/true,
+ /*isVariadic=*/false,
+ /*isSynthesized=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
@@ -2134,13 +2292,13 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
}
case Decl::ObjCProtocol:
- Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ ObjCRuntime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
break;
case Decl::ObjCCategoryImpl:
// Categories have properties but don't support synthesize so we
// can ignore them here.
- Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
break;
case Decl::ObjCImplementation: {
@@ -2149,7 +2307,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
- Runtime->GenerateClass(OMD);
+ ObjCRuntime->GenerateClass(OMD);
break;
}
case Decl::ObjCMethod: {
@@ -2169,11 +2327,13 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::FileScopeAsm: {
FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
- llvm::StringRef AsmString = AD->getAsmString()->getString();
+ StringRef AsmString = AD->getAsmString()->getString();
const std::string &S = getModule().getModuleInlineAsm();
if (S.empty())
getModule().setModuleInlineAsm(AsmString);
+ else if (*--S.end() == '\n')
+ getModule().setModuleInlineAsm(S + AsmString.str());
else
getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
break;
@@ -2191,7 +2351,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
- const llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
return llvm::ConstantInt::get(i64, PtrInt);
}
@@ -2222,7 +2382,7 @@ void CodeGenModule::EmitDeclMetadata() {
llvm::NamedMDNode *GlobalMetadata = 0;
// StaticLocalDeclMap
- for (llvm::DenseMap<GlobalDecl,llvm::StringRef>::iterator
+ for (llvm::DenseMap<GlobalDecl,StringRef>::iterator
I = MangledDeclNames.begin(), E = MangledDeclNames.end();
I != E; ++I) {
llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
@@ -2273,81 +2433,3 @@ void CodeGenModule::EmitCoverageFile() {
}
}
}
-
-///@name Custom Runtime Function Interfaces
-///@{
-//
-// FIXME: These can be eliminated once we can have clients just get the required
-// AST nodes from the builtin tables.
-
-llvm::Constant *CodeGenModule::getBlockObjectDispose() {
- if (BlockObjectDispose)
- return BlockObjectDispose;
-
- // If we saw an explicit decl, use that.
- if (BlockObjectDisposeDecl) {
- return BlockObjectDispose = GetAddrOfFunction(
- BlockObjectDisposeDecl,
- getTypes().GetFunctionType(BlockObjectDisposeDecl));
- }
-
- // Otherwise construct the function by hand.
- llvm::Type *args[] = { Int8PtrTy, Int32Ty };
- const llvm::FunctionType *fty
- = llvm::FunctionType::get(VoidTy, args, false);
- return BlockObjectDispose =
- CreateRuntimeFunction(fty, "_Block_object_dispose");
-}
-
-llvm::Constant *CodeGenModule::getBlockObjectAssign() {
- if (BlockObjectAssign)
- return BlockObjectAssign;
-
- // If we saw an explicit decl, use that.
- if (BlockObjectAssignDecl) {
- return BlockObjectAssign = GetAddrOfFunction(
- BlockObjectAssignDecl,
- getTypes().GetFunctionType(BlockObjectAssignDecl));
- }
-
- // Otherwise construct the function by hand.
- llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
- const llvm::FunctionType *fty
- = llvm::FunctionType::get(VoidTy, args, false);
- return BlockObjectAssign =
- CreateRuntimeFunction(fty, "_Block_object_assign");
-}
-
-llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
- if (NSConcreteGlobalBlock)
- return NSConcreteGlobalBlock;
-
- // If we saw an explicit decl, use that.
- if (NSConcreteGlobalBlockDecl) {
- return NSConcreteGlobalBlock = GetAddrOfGlobalVar(
- NSConcreteGlobalBlockDecl,
- getTypes().ConvertType(NSConcreteGlobalBlockDecl->getType()));
- }
-
- // Otherwise construct the variable by hand.
- return NSConcreteGlobalBlock =
- CreateRuntimeVariable(Int8PtrTy, "_NSConcreteGlobalBlock");
-}
-
-llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
- if (NSConcreteStackBlock)
- return NSConcreteStackBlock;
-
- // If we saw an explicit decl, use that.
- if (NSConcreteStackBlockDecl) {
- return NSConcreteStackBlock = GetAddrOfGlobalVar(
- NSConcreteStackBlockDecl,
- getTypes().ConvertType(NSConcreteStackBlockDecl->getType()));
- }
-
- // Otherwise construct the variable by hand.
- return NSConcreteStackBlock =
- CreateRuntimeVariable(Int8PtrTy, "_NSConcreteStackBlock");
-}
-
-///@}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 86fb6d4d030e..8e38a8999013 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -62,7 +62,7 @@ namespace clang {
class VarDecl;
class LangOptions;
class CodeGenOptions;
- class Diagnostic;
+ class DiagnosticsEngine;
class AnnotateAttr;
class CXXDestructorDecl;
class MangleBuffer;
@@ -75,6 +75,8 @@ namespace CodeGen {
class CGCXXABI;
class CGDebugInfo;
class CGObjCRuntime;
+ class CGOpenCLRuntime;
+ class CGCUDARuntime;
class BlockFieldFlags;
class FunctionArgList;
@@ -129,8 +131,12 @@ namespace CodeGen {
/// The width of a pointer into the generic address space.
unsigned char PointerWidthInBits;
- /// The alignment of a pointer into the generic address space.
- unsigned char PointerAlignInBytes;
+ /// The size and alignment of a pointer into the generic address
+ /// space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ };
};
struct RREntrypoints {
@@ -212,7 +218,7 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::Module &TheModule;
const llvm::TargetData &TheTargetData;
mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
- Diagnostic &Diags;
+ DiagnosticsEngine &Diags;
CGCXXABI &ABI;
CodeGenTypes Types;
CodeGenTBAA *TBAA;
@@ -221,7 +227,9 @@ class CodeGenModule : public CodeGenTypeCache {
CodeGenVTables VTables;
friend class CodeGenVTables;
- CGObjCRuntime* Runtime;
+ CGObjCRuntime* ObjCRuntime;
+ CGOpenCLRuntime* OpenCLRuntime;
+ CGCUDARuntime* CUDARuntime;
CGDebugInfo* DebugInfo;
ARCEntrypoints *ARCData;
RREntrypoints *RRData;
@@ -257,13 +265,17 @@ class CodeGenModule : public CodeGenTypeCache {
CtorList GlobalDtors;
/// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
- llvm::DenseMap<GlobalDecl, llvm::StringRef> MangledDeclNames;
+ llvm::DenseMap<GlobalDecl, StringRef> MangledDeclNames;
llvm::BumpPtrAllocator MangledNamesAllocator;
+ /// Global annotations.
std::vector<llvm::Constant*> Annotations;
+ /// Map used to get unique annotation strings.
+ llvm::StringMap<llvm::Constant*> AnnotationStrings;
+
llvm::StringMap<llvm::Constant*> CFConstantStringMap;
- llvm::StringMap<llvm::Constant*> ConstantStringMap;
+ llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
llvm::DenseMap<const Decl*, llvm::Value*> StaticLocalDeclMap;
/// CXXGlobalInits - Global variables with initializers that need to run
@@ -279,13 +291,16 @@ class CodeGenModule : public CodeGenTypeCache {
/// - Global variables with initializers whose order of initialization
/// is set by init_priority attribute.
- llvm::SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
PrioritizedCXXGlobalInits;
/// CXXGlobalDtors - Global destructor functions and arguments that need to
/// run on termination.
std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
+ /// @name Cache for Objective-C runtime types
+ /// @{
+
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
@@ -294,21 +309,29 @@ class CodeGenModule : public CodeGenTypeCache {
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *ConstantStringClassRef;
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType;
+
+ /// \brief The type used to describe the state of a fast enumeration in
+ /// Objective-C's for..in loop.
+ QualType ObjCFastEnumerationStateType;
+
+ /// @}
+
/// Lazily create the Objective-C runtime
void createObjCRuntime();
+ void createOpenCLRuntime();
+ void createCUDARuntime();
+
llvm::LLVMContext &VMContext;
/// @name Cache for Blocks Runtime Globals
/// @{
- const VarDecl *NSConcreteGlobalBlockDecl;
- const VarDecl *NSConcreteStackBlockDecl;
llvm::Constant *NSConcreteGlobalBlock;
llvm::Constant *NSConcreteStackBlock;
- const FunctionDecl *BlockObjectAssignDecl;
- const FunctionDecl *BlockObjectDisposeDecl;
llvm::Constant *BlockObjectAssign;
llvm::Constant *BlockObjectDispose;
@@ -322,7 +345,8 @@ class CodeGenModule : public CodeGenTypeCache {
/// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
- llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+ llvm::Module &M, const llvm::TargetData &TD,
+ DiagnosticsEngine &Diags);
~CodeGenModule();
@@ -332,13 +356,25 @@ public:
/// getObjCRuntime() - Return a reference to the configured
/// Objective-C runtime.
CGObjCRuntime &getObjCRuntime() {
- if (!Runtime) createObjCRuntime();
- return *Runtime;
+ if (!ObjCRuntime) createObjCRuntime();
+ return *ObjCRuntime;
}
/// hasObjCRuntime() - Return true iff an Objective-C runtime has
/// been configured.
- bool hasObjCRuntime() { return !!Runtime; }
+ bool hasObjCRuntime() { return !!ObjCRuntime; }
+
+ /// getOpenCLRuntime() - Return a reference to the configured OpenCL runtime.
+ CGOpenCLRuntime &getOpenCLRuntime() {
+ assert(OpenCLRuntime != 0);
+ return *OpenCLRuntime;
+ }
+
+ /// getCUDARuntime() - Return a reference to the configured CUDA runtime.
+ CGCUDARuntime &getCUDARuntime() {
+ assert(CUDARuntime != 0);
+ return *CUDARuntime;
+ }
/// getCXXABI() - Return a reference to the configured C++ ABI.
CGCXXABI &getCXXABI() { return ABI; }
@@ -369,9 +405,10 @@ public:
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
CodeGenVTables &getVTables() { return VTables; }
- Diagnostic &getDiags() const { return Diags; }
+ VTableContext &getVTableContext() { return VTables.getVTableContext(); }
+ DiagnosticsEngine &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
- const TargetInfo &getTarget() const { return Context.Target; }
+ const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
const TargetCodeGenInfo &getTargetCodeGenInfo();
bool isTargetDarwin() const;
@@ -433,7 +470,7 @@ public:
/// variable with the right type will be created and all uses of the old
/// variable will be replaced with a bitcast to the new variable.
llvm::GlobalVariable *
- CreateOrReplaceCXXRuntimeVariable(llvm::StringRef Name, const llvm::Type *Ty,
+ CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage);
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
@@ -441,14 +478,14 @@ public:
/// then it will be greated with the specified type instead of whatever the
/// normal requested type would be.
llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
- const llvm::Type *Ty = 0);
+ llvm::Type *Ty = 0);
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
- const llvm::Type *Ty = 0,
+ llvm::Type *Ty = 0,
bool ForVTable = false);
/// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
@@ -544,8 +581,9 @@ public:
///
/// \param GlobalName If provided, the name to use for the global
/// (if one is created).
- llvm::Constant *GetAddrOfConstantString(llvm::StringRef Str,
- const char *GlobalName=0);
+ llvm::Constant *GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName=0,
+ unsigned Alignment=1);
/// GetAddrOfConstantCString - Returns a pointer to a character array
/// containing the literal and a terminating '\0' character. The result has
@@ -554,7 +592,12 @@ public:
/// \param GlobalName If provided, the name to use for the global (if one is
/// created).
llvm::Constant *GetAddrOfConstantCString(const std::string &str,
- const char *GlobalName=0);
+ const char *GlobalName=0,
+ unsigned Alignment=1);
+
+ /// \brief Retrieve the record type that describes the state of an
+ /// Objective-C fast enumeration loop (for..in).
+ QualType getObjCFastEnumerationStateType();
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
@@ -573,8 +616,8 @@ public:
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getIntrinsic(unsigned IID, llvm::ArrayRef<llvm::Type*> Tys =
- llvm::ArrayRef<llvm::Type*>());
+ llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys =
+ ArrayRef<llvm::Type*>());
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
@@ -584,8 +627,6 @@ public:
/// metadata global.
void AddUsedGlobal(llvm::GlobalValue *GV);
- void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
-
/// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
/// destructor function.
void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
@@ -594,14 +635,14 @@ public:
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
- llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
- llvm::StringRef Name,
+ llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
+ StringRef Name,
llvm::Attributes ExtraAttrs =
llvm::Attribute::None);
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
- llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
- llvm::StringRef Name);
+ llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name);
///@name Custom Blocks Runtime Interfaces
///@{
@@ -629,11 +670,13 @@ public:
/// but not always, an LLVM null constant.
llvm::Constant *EmitNullConstant(QualType T);
- llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
- const AnnotateAttr *AA, unsigned LineNo);
+ /// EmitNullConstantForBase - Return a null constant appropriate for
+ /// zero-initializing a base class with the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstantForBase(const CXXRecordDecl *Record);
/// Error - Emit a general error that something can't be done.
- void Error(SourceLocation loc, llvm::StringRef error);
+ void Error(SourceLocation loc, StringRef error);
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
@@ -688,7 +731,7 @@ public:
AttributeListType &PAL,
unsigned &CallingConv);
- llvm::StringRef getMangledName(GlobalDecl GD);
+ StringRef getMangledName(GlobalDecl GD);
void getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
const BlockDecl *BD);
@@ -709,7 +752,7 @@ public:
/// GetTargetTypeStoreSize - Return the store size, in character units, of
/// the given LLVM type.
- CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
+ CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const;
/// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
/// variable.
@@ -719,17 +762,44 @@ public:
std::vector<const CXXRecordDecl*> DeferredVTables;
+ /// Emit all the global annotations.
+ void EmitGlobalAnnotations();
+
+ /// Emit an annotation string.
+ llvm::Constant *EmitAnnotationString(llvm::StringRef Str);
+
+ /// Emit the annotation's translation unit.
+ llvm::Constant *EmitAnnotationUnit(SourceLocation Loc);
+
+ /// Emit the annotation line number.
+ llvm::Constant *EmitAnnotationLineNo(SourceLocation L);
+
+ /// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+ /// annotation information for a given GlobalValue. The annotation struct is
+ /// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+ /// GlobalValue being annotated. The second field is the constant string
+ /// created from the AnnotateAttr's annotation. The third field is a constant
+ /// string containing the name of the translation unit. The fourth field is
+ /// the line number in the file of the annotated value declaration.
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L);
+
+ /// Add global annotations that are set on D, for the global GV. Those
+ /// annotations are emitted during finalization of the LLVM code.
+ void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
+
private:
- llvm::GlobalValue *GetGlobalValue(llvm::StringRef Ref);
+ llvm::GlobalValue *GetGlobalValue(StringRef Ref);
- llvm::Constant *GetOrCreateLLVMFunction(llvm::StringRef MangledName,
- const llvm::Type *Ty,
+ llvm::Constant *GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
GlobalDecl D,
bool ForVTable,
llvm::Attributes ExtraAttrs =
llvm::Attribute::None);
- llvm::Constant *GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
- const llvm::PointerType *PTy,
+ llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *PTy,
const VarDecl *D,
bool UnnamedAddr = false);
@@ -804,8 +874,6 @@ private:
/// suitable for use as a LLVM constructor or destructor array.
void EmitCtorList(const CtorList &Fns, const char *GlobalName);
- void EmitAnnotations(void);
-
/// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
/// given type.
void EmitFundamentalRTTIDescriptor(QualType Type);
diff --git a/lib/CodeGen/CodeGenTBAA.cpp b/lib/CodeGen/CodeGenTBAA.cpp
index 53e40b2238b3..887c1eabb0c6 100644
--- a/lib/CodeGen/CodeGenTBAA.cpp
+++ b/lib/CodeGen/CodeGenTBAA.cpp
@@ -58,7 +58,7 @@ llvm::MDNode *CodeGenTBAA::getChar() {
/// getTBAAInfoForNamedType - Create a TBAA tree node with the given string
/// as its identifier, and the given Parent node as its tree parent.
-llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(llvm::StringRef NameStr,
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(StringRef NameStr,
llvm::MDNode *Parent,
bool Readonly) {
// Currently there is only one flag defined - the readonly flag.
@@ -75,7 +75,7 @@ llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(llvm::StringRef NameStr,
// Create the mdnode.
unsigned Len = llvm::array_lengthof(Ops) - !Flags;
- return llvm::MDNode::get(VMContext, llvm::ArrayRef<llvm::Value*>(Ops, Len));
+ return llvm::MDNode::get(VMContext, llvm::makeArrayRef(Ops, Len));
}
static bool TypeHasMayAlias(QualType QTy) {
diff --git a/lib/CodeGen/CodeGenTBAA.h b/lib/CodeGen/CodeGenTBAA.h
index c4583473a0e0..9fe51fb33141 100644
--- a/lib/CodeGen/CodeGenTBAA.h
+++ b/lib/CodeGen/CodeGenTBAA.h
@@ -15,7 +15,7 @@
#ifndef CLANG_CODEGEN_CODEGENTBAA_H
#define CLANG_CODEGEN_CODEGENTBAA_H
-#include "llvm/LLVMContext.h"
+#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
namespace llvm {
@@ -55,7 +55,7 @@ class CodeGenTBAA {
/// considered to be equivalent to it.
llvm::MDNode *getChar();
- llvm::MDNode *getTBAAInfoForNamedType(llvm::StringRef NameStr,
+ llvm::MDNode *getTBAAInfoForNamedType(StringRef NameStr,
llvm::MDNode *Parent,
bool Readonly = false);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 764688fcb12e..e0d921896580 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -29,7 +29,7 @@ using namespace CodeGen;
CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
const llvm::TargetData &TD, const ABIInfo &Info,
CGCXXABI &CXXABI, const CodeGenOptions &CGO)
- : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+ : Context(Ctx), Target(Ctx.getTargetInfo()), TheModule(M), TheTargetData(TD),
TheABIInfo(Info), TheCXXABI(CXXABI), CodeGenOpts(CGO) {
SkippedLayout = false;
}
@@ -47,7 +47,7 @@ CodeGenTypes::~CodeGenTypes() {
void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
llvm::StructType *Ty,
- llvm::StringRef suffix) {
+ StringRef suffix) {
llvm::SmallString<256> TypeName;
llvm::raw_svector_ostream OS(TypeName);
OS << RD->getKindName() << '.';
@@ -263,6 +263,8 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEhalf)
+ return llvm::Type::getInt16Ty(VMContext);
if (&format == &llvm::APFloat::IEEEsingle)
return llvm::Type::getFloatTy(VMContext);
if (&format == &llvm::APFloat::IEEEdouble)
@@ -273,8 +275,7 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
return llvm::Type::getPPC_FP128Ty(VMContext);
if (&format == &llvm::APFloat::x87DoubleExtended)
return llvm::Type::getX86_FP80Ty(VMContext);
- assert(0 && "Unknown float format!");
- return 0;
+ llvm_unreachable("Unknown float format!");
}
/// ConvertType - Convert the specified type to its LLVM form.
@@ -342,6 +343,14 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
static_cast<unsigned>(Context.getTypeSize(T)));
break;
+ case BuiltinType::Half:
+ // Half is special: it might be lowered to i16 (and will be storage-only
+ // type),. or can be represented as a set of native operations.
+
+ // FIXME: Ask target which kind of half FP it prefers (storage only vs
+ // native).
+ ResultType = llvm::Type::getInt16Ty(getLLVMContext());
+ break;
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
@@ -418,7 +427,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::ConstantArray: {
const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
- const llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+ llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+
+ // Lower arrays of undefined struct type to arrays of i8 just to have a
+ // concrete type.
+ if (!EltTy->isSized()) {
+ SkippedLayout = true;
+ EltTy = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+
ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
break;
}
@@ -502,7 +519,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// these.
llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
if (!T)
- T = llvm::StructType::createNamed(getLLVMContext(), "");
+ T = llvm::StructType::create(getLLVMContext());
ResultType = T;
break;
}
@@ -511,15 +528,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// Protocol qualifications do not influence the LLVM type, we just return a
// pointer to the underlying interface type. We don't need to worry about
// recursive conversion.
- const llvm::Type *T =
- ConvertType(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ llvm::Type *T =
+ ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
ResultType = T->getPointerTo();
break;
}
case Type::Enum: {
const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
- if (ED->isDefinition() || ED->isFixed())
+ if (ED->isCompleteDefinition() || ED->isFixed())
return ConvertType(ED->getIntegerType());
// Return a placeholder 'i32' type. This can be changed later when the
// type is defined (see UpdateCompletedType), but is likely to be the
@@ -541,6 +558,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
break;
}
+
+ case Type::Atomic: {
+ ResultType = ConvertTypeForMem(cast<AtomicType>(Ty)->getValueType());
+ break;
+ }
}
assert(ResultType && "Didn't convert a type?");
@@ -559,7 +581,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// If we don't have a StructType at all yet, create the forward declaration.
if (Entry == 0) {
- Entry = llvm::StructType::createNamed(getLLVMContext(), "");
+ Entry = llvm::StructType::create(getLLVMContext());
addRecordTypeName(RD, Entry, "");
}
llvm::StructType *Ty = Entry;
@@ -567,7 +589,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// If this is still a forward declaration, or the LLVM type is already
// complete, there's nothing more to do.
RD = RD->getDefinition();
- if (RD == 0 || !Ty->isOpaque())
+ if (RD == 0 || !RD->isCompleteDefinition() || !Ty->isOpaque())
return Ty;
// If converting this type would cause us to infinitely loop, don't do it!
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 7c0fb8164373..7f0f8ac5f0c5 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -93,7 +93,7 @@ class CodeGenTypes {
/// a recursive struct conversion, set this to true.
bool SkippedLayout;
- llvm::SmallVector<const RecordDecl *, 8> DeferredRecords;
+ SmallVector<const RecordDecl *, 8> DeferredRecords;
private:
/// TypeCache - This map keeps cache of llvm::Types
@@ -138,7 +138,7 @@ public:
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
- const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+ llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
@@ -190,7 +190,7 @@ public:
///
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
- const llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ const SmallVectorImpl<CanQualType> &ArgTys,
const FunctionType::ExtInfo &Info);
/// \brief Compute a new LLVM record layout object for the given record.
@@ -200,7 +200,7 @@ public:
/// addRecordTypeName - Compute a name from the given record decl with an
/// optional suffix and name the given LLVM type using it.
void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
- llvm::StringRef suffix);
+ StringRef suffix);
public: // These are internal details of CGT that shouldn't be used externally.
@@ -211,7 +211,7 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType type,
- llvm::SmallVectorImpl<llvm::Type*> &expanded);
+ SmallVectorImpl<llvm::Type*> &expanded);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 0c86080fa80e..c3f635aed642 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -96,12 +96,12 @@ public:
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildInstanceFunctionParams(CodeGenFunction &CGF,
QualType &ResTy,
@@ -131,12 +131,12 @@ public:
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType T,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys);
+ SmallVectorImpl<CanQualType> &ArgTys);
void BuildInstanceFunctionParams(CodeGenFunction &CGF,
QualType &ResTy,
@@ -215,11 +215,11 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
FPT->isVariadic());
- const llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::IntegerType *ptrdiff = getPtrDiffTy();
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
@@ -259,7 +259,7 @@ ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
CGF.EmitBlock(FnVirtual);
// Cast the adjusted this to a pointer to vtable pointer and load.
- const llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Type *VTableTy = Builder.getInt8PtrTy();
llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
VTable = Builder.CreateLoad(VTable, "memptr.vtable");
@@ -307,7 +307,7 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
- const llvm::Type *PType
+ llvm::Type *PType
= CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
return Builder.CreateBitCast(Addr, PType);
}
@@ -478,7 +478,7 @@ ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
llvm::Constant *
ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
- const llvm::Type *ptrdiff_t = getPtrDiffTy();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
// Itanium C++ ABI 2.3:
// A NULL pointer is represented as -1.
@@ -504,16 +504,16 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
MD = MD->getCanonicalDecl();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::Type *ptrdiff_t = getPtrDiffTy();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
// Get the function pointer (or index if this is a virtual function).
llvm::Constant *MemPtr[2];
if (MD->isVirtual()) {
- uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
+ uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD);
const ASTContext &Context = getContext();
CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.Target.getPointerWidth(0));
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
if (IsARM) {
@@ -535,7 +535,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
}
} else {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
- const llvm::Type *Ty;
+ llvm::Type *Ty;
// Check whether the function has a computable LLVM signature.
if (Types.isFuncTypeConvertible(FPT)) {
// The function has a computable LLVM signature; use the correct type.
@@ -678,7 +678,7 @@ bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
// 'this' is already there.
@@ -692,7 +692,7 @@ void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
ResTy = ArgTys[0];
}
@@ -702,7 +702,7 @@ void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
ASTContext &Context = getContext();
// 'this' is already there.
@@ -717,7 +717,7 @@ void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
if (Type != Dtor_Deleting)
@@ -784,7 +784,7 @@ void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
// Destructor thunks in the ARM ABI have indeterminate results.
- const llvm::Type *T =
+ llvm::Type *T =
cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
RValue Undef = RValue::get(llvm::UndefValue::get(T));
return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
@@ -829,27 +829,7 @@ bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
if (expr->doesUsualArrayDeleteWantSize())
return true;
- // Automatic Reference Counting:
- // We need an array cookie for pointers with strong or weak lifetime.
- if (getContext().getLangOptions().ObjCAutoRefCount &&
- elementType->isObjCLifetimeType()) {
- switch (elementType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- return true;
- }
- }
-
- // Otherwise, if the class has a non-trivial destructor, it always
- // needs a cookie.
- const CXXRecordDecl *record =
- elementType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
- return (record && !record->hasTrivialDestructor());
+ return elementType.isDestructedType();
}
CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
@@ -907,7 +887,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
CharUnits &CookieSize) {
// Derive a char* in the same address space as the pointer.
unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
if (!NeedsArrayCookie(expr, ElementType)) {
@@ -919,7 +899,7 @@ void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
QualType SizeTy = getContext().getSizeType();
CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
CookieSize
= std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
@@ -968,7 +948,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
ASTContext &Ctx = getContext();
CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
- const llvm::IntegerType *SizeTy =
+ llvm::IntegerType *SizeTy =
cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
// The cookie is always at the start of the buffer.
@@ -1000,7 +980,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
CharUnits &CookieSize) {
// Derive a char* in the same address space as the pointer.
unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
- const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
// If we don't need an array cookie, bail out early.
if (!NeedsArrayCookie(expr, ElementType)) {
@@ -1012,7 +992,7 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
QualType SizeTy = getContext().getSizeType();
CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
- const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
// The cookie size is always 2 * sizeof(size_t).
CookieSize = 2 * SizeSize;
@@ -1036,10 +1016,9 @@ void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// int __cxa_guard_acquire(__guard *guard_object);
- llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
- ArgTys, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
}
@@ -1047,10 +1026,9 @@ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_release(__guard *guard_object);
- llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- ArgTys, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
}
@@ -1058,10 +1036,9 @@ static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
llvm::PointerType *GuardPtrTy) {
// void __cxa_guard_abort(__guard *guard_object);
- llvm::Type *ArgTys[] = { GuardPtrTy };
- const llvm::FunctionType *FTy =
+ llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- ArgTys, /*isVarArg=*/false);
+ GuardPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
}
@@ -1090,7 +1067,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
bool threadsafe =
(getContext().getLangOptions().ThreadsafeStatics && D.isLocalVarDecl());
- const llvm::IntegerType *GuardTy;
+ llvm::IntegerType *GuardTy;
// If we have a global variable with internal linkage and thread-safe statics
// are disabled, we can just let the guard variable be of type i8.
@@ -1152,21 +1129,28 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// }
} else {
// Load the first byte of the guard variable.
- const llvm::Type *PtrTy = Builder.getInt8PtrTy();
- llvm::Value *V =
- Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
-
- IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+ llvm::Type *PtrTy = Builder.getInt8PtrTy();
+ llvm::LoadInst *LI =
+ Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy));
+ LI->setAlignment(1);
+
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ //
+ // In LLVM, we do this by marking the load Acquire.
+ if (threadsafe)
+ LI->setAtomic(llvm::Acquire);
+
+ IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
}
llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
- llvm::BasicBlock *NoCheckBlock = EndBlock;
- if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier");
-
// Check if the first byte of the guard variable is zero.
- Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock);
+ Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock);
CGF.EmitBlock(InitCheckBlock);
@@ -1200,23 +1184,5 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable);
}
- // Emit an acquire memory barrier if using thread-safe statics:
- // Itanium ABI:
- // An implementation supporting thread-safety on multiprocessor
- // systems must also guarantee that references to the initialized
- // object do not occur before the load of the initialization flag.
- if (threadsafe) {
- Builder.CreateBr(EndBlock);
- CGF.EmitBlock(NoCheckBlock);
-
- llvm::Value *_false = Builder.getFalse();
- llvm::Value *_true = Builder.getTrue();
-
- Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier),
- /* load-load, load-store */ _true, _true,
- /* store-load, store-store */ _false, _false,
- /* device or I/O */ _false);
- }
-
CGF.EmitBlock(EndBlock);
}
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 747e5e3222c2..e200e7961701 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -31,7 +31,7 @@ public:
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
// 'this' is already in place
// TODO: 'for base' flag
}
@@ -39,7 +39,7 @@ public:
void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
CXXDtorType Type,
CanQualType &ResTy,
- llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ SmallVectorImpl<CanQualType> &ArgTys) {
// 'this' is already in place
// TODO: 'for base' flag
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 4a2c4abbeb03..793ee9192e6b 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -27,7 +27,7 @@ using namespace clang;
namespace {
class CodeGeneratorImpl : public CodeGenerator {
- Diagnostic &Diags;
+ DiagnosticsEngine &Diags;
llvm::OwningPtr<const llvm::TargetData> TD;
ASTContext *Ctx;
const CodeGenOptions CodeGenOpts; // Intentionally copied in.
@@ -35,7 +35,7 @@ namespace {
llvm::OwningPtr<llvm::Module> M;
llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
public:
- CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+ CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
const CodeGenOptions &CGO, llvm::LLVMContext& C)
: Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
@@ -52,9 +52,9 @@ namespace {
virtual void Initialize(ASTContext &Context) {
Ctx = &Context;
- M->setTargetTriple(Ctx->Target.getTriple().getTriple());
- M->setDataLayout(Ctx->Target.getTargetDescription());
- TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+ M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
+ M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->getTargetInfo().getTargetDescription()));
Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
*M, *TD, Diags));
}
@@ -112,7 +112,7 @@ namespace {
};
}
-CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
const std::string& ModuleName,
const CodeGenOptions &CGO,
llvm::LLVMContext& C) {
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index df2c1bd98cca..e1dc8f7ffdbd 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -57,12 +57,12 @@ const llvm::TargetData &ABIInfo::getTargetData() const {
void ABIArgInfo::dump() const {
- llvm::raw_ostream &OS = llvm::errs();
+ raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
switch (TheKind) {
case Direct:
OS << "Direct Type=";
- if (const llvm::Type *Ty = getCoerceToType())
+ if (llvm::Type *Ty = getCoerceToType())
Ty->print(OS);
else
OS << "null";
@@ -87,6 +87,25 @@ void ABIArgInfo::dump() const {
TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+// If someone can figure out a general rule for this, that would be great.
+// It's probably just doomed to be platform-dependent, though.
+unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
+ // Verified for:
+ // x86-64 FreeBSD, Linux, Darwin
+ // x86-32 FreeBSD, Linux, Darwin
+ // PowerPC Linux, Darwin
+ // ARM Darwin (*not* EABI)
+ return 32;
+}
+
+bool TargetCodeGenInfo::isNoProtoCallVariadic(CallingConv CC) const {
+ // The following conventions are known to require this to be false:
+ // x86_stdcall
+ // MIPS
+ // For everything else, we just prefer false unless we opt out.
+ return false;
+}
+
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
/// isEmptyField - Return true iff a the field is "empty", that is it
@@ -348,7 +367,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
/// UseX86_MMXType - Return true if this is an MMX type that should use the special
/// x86_mmx type.
-bool UseX86_MMXType(const llvm::Type *IRType) {
+bool UseX86_MMXType(llvm::Type *IRType) {
// If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
// special x86_mmx type.
return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
@@ -357,7 +376,7 @@ bool UseX86_MMXType(const llvm::Type *IRType) {
}
static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
+ StringRef Constraint,
llvm::Type* Ty) {
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
@@ -428,7 +447,7 @@ public:
llvm::Value *Address) const;
llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
+ StringRef Constraint,
llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
@@ -724,8 +743,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -765,7 +784,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-7 are the eight integer registers; the order is different
@@ -892,7 +911,7 @@ class X86_64ABIInfo : public ABIInfo {
/// required strict binary compatibility with older versions of GCC
/// may need to exempt themselves.
bool honorsRevision0_98() const {
- return !getContext().Target.getTriple().isOSDarwin();
+ return !getContext().getTargetInfo().getTriple().isOSDarwin();
}
public:
@@ -932,7 +951,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
// 0-15 are the 16 integer registers.
@@ -943,11 +962,20 @@ public:
}
llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
+ StringRef Constraint,
llvm::Type* Ty) const {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
+ bool isNoProtoCallVariadic(CallingConv CC) const {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior.
+ if (CC == CC_Default || CC == CC_C) return true;
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(CC);
+ }
+
};
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -964,7 +992,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
// 0-15 are the 16 integer registers.
@@ -1309,8 +1337,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
continue;
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size =
- i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue();
+ uint64_t Size = i->getBitWidthValue(getContext());
uint64_t EB_Lo = Offset / 64;
uint64_t EB_Hi = (Offset + Size - 1) / 64;
@@ -1489,14 +1516,14 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
/// float member at the specified offset. For example, {int,{float}} has a
/// float at offset 4. It is conservatively correct for this routine to return
/// false.
-static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
+static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
const llvm::TargetData &TD) {
// Base case if we find a float.
if (IROffset == 0 && IRType->isFloatTy())
return true;
// If this is a struct, recurse into the field at the specified offset.
- if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
const llvm::StructLayout *SL = TD.getStructLayout(STy);
unsigned Elt = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(Elt);
@@ -1504,8 +1531,8 @@ static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
}
// If this is an array, recurse into the field at the specified offset.
- if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- const llvm::Type *EltTy = ATy->getElementType();
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = TD.getTypeAllocSize(EltTy);
IROffset -= IROffset/EltSize*EltSize;
return ContainsFloatAtOffset(EltTy, IROffset, TD);
@@ -1578,7 +1605,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
}
}
- if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
// If this is a struct, recurse into the field at the specified offset.
const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
if (IROffset < SL->getSizeInBytes()) {
@@ -1590,7 +1617,7 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
}
}
- if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
@@ -1678,7 +1705,7 @@ classifyReturnType(QualType RetTy) const {
case SSEUp:
case X87Up:
- assert(0 && "Invalid classification for lo word.");
+ llvm_unreachable("Invalid classification for lo word.");
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
@@ -1732,7 +1759,7 @@ classifyReturnType(QualType RetTy) const {
// never occur as a hi class.
case Memory:
case X87:
- assert(0 && "Invalid classification for hi word.");
+ llvm_unreachable("Invalid classification for hi word.");
case ComplexX87: // Previously handled.
case NoClass:
@@ -1820,7 +1847,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case SSEUp:
case X87Up:
- assert(0 && "Invalid classification for lo word.");
+ llvm_unreachable("Invalid classification for lo word.");
// AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
// available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
@@ -1864,8 +1891,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
case Memory:
case X87:
case ComplexX87:
- assert(0 && "Invalid classification for hi word.");
- break;
+ llvm_unreachable("Invalid classification for hi word.");
case NoClass: break;
@@ -1970,7 +1996,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
}
// AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Res =
CGF.Builder.CreateBitCast(overflow_arg_area,
llvm::PointerType::getUnqual(LTy));
@@ -2061,22 +2087,22 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// collect arguments from different places; often what should result in a
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
- const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *RegAddr =
CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
"reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
- const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- const llvm::Type *TyLo = ST->getElementType(0);
- const llvm::Type *TyHi = ST->getElementType(1);
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
"Unexpected ABI info for mixed regs");
- const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
@@ -2104,9 +2130,9 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
- const llvm::Type *DblPtrTy =
+ llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
- const llvm::StructType *ST = llvm::StructType::get(DoubleTy,
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy,
DoubleTy, NULL);
llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
@@ -2166,7 +2192,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
if (Size == 128 &&
- getContext().Target.getTriple().getOS() == llvm::Triple::MinGW32)
+ getContext().getTargetInfo().getTriple().getOS() == llvm::Triple::MinGW32)
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
@@ -2198,8 +2224,8 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
@@ -2246,7 +2272,7 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
@@ -2300,6 +2326,11 @@ private:
public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
+ bool isEABI() const {
+ StringRef Env = getContext().getTargetInfo().getTriple().getEnvironmentName();
+ return (Env == "gnueabi" || Env == "eabi");
+ }
+
private:
ABIKind getABIKind() const { return Kind; }
@@ -2317,11 +2348,15 @@ public:
ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
:TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+ const ARMABIInfo &getABIInfo() const {
+ return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
return 13;
}
- llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const {
return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
}
@@ -2330,7 +2365,7 @@ public:
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::LLVMContext &Context = CGF.getLLVMContext();
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-15 are the 16 integer registers.
@@ -2338,6 +2373,11 @@ public:
return false;
}
+
+ unsigned getSizeOfUnwindException() const {
+ if (getABIInfo().isEABI()) return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
};
}
@@ -2354,8 +2394,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
- if (Env == "gnueabi" || Env == "eabi")
+ if (isEABI())
DefaultCC = llvm::CallingConv::ARM_AAPCS;
else
DefaultCC = llvm::CallingConv::ARM_APCS;
@@ -2379,6 +2418,73 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
}
}
+/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
+/// aggregate. If HAMembers is non-null, the number of base elements
+/// contained in the type is returned through it; this is used for the
+/// recursive calls that check aggregate component types.
+static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ ASTContext &Context,
+ uint64_t *HAMembers = 0) {
+ uint64_t Members;
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
+ return false;
+ Members *= AT->getSize().getZExtValue();
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->isUnion() || RD->hasFlexibleArrayMember())
+ return false;
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CXXRD->isAggregate())
+ return false;
+ }
+ Members = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
+ return false;
+ Members += FldMembers;
+ }
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() != BuiltinType::Float &&
+ BT->getKind() != BuiltinType::Double)
+ return false;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize != 64 && VecSize != 128)
+ return false;
+ } else {
+ return false;
+ }
+
+ // The base type must be the same for all members. Vector types of the
+ // same total size are treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base)
+ Base = TyPtr;
+ if (Base != TyPtr &&
+ (!Base->isVectorType() || !TyPtr->isVectorType() ||
+ Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
+ return false;
+ }
+
+ // Homogeneous Aggregates can have at most 4 members of the base type.
+ if (HAMembers)
+ *HAMembers = Members;
+ return (Members <= 4);
+}
+
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
@@ -2398,23 +2504,26 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded.
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(Ty, Base, getContext()))
+ return ABIArgInfo::getExpand();
+ }
+
// Otherwise, pass by coercing to a structure of the appropriate size.
//
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
// FIXME: This doesn't handle alignment > 64 bits.
- const llvm::Type* ElemTy;
+ llvm::Type* ElemTy;
unsigned SizeRegs;
- if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) {
- ElemTy = llvm::Type::getInt32Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- } else if (getABIKind() == ARMABIInfo::APCS) {
- // Initial ARM ByVal support is APCS-only.
- return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
- } else {
- // FIXME: This is kind of nasty... but there isn't much choice
- // because most of the ARM calling conventions don't yet support
- // byval.
+ if (getContext().getTypeAlign(Ty) > 32) {
ElemTy = llvm::Type::getInt64Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
}
llvm::Type *STy =
@@ -2579,14 +2688,23 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- // FIXME: Need to handle alignment
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 &&
+ "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
@@ -2623,6 +2741,9 @@ class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
PTXTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
};
ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
@@ -2652,13 +2773,21 @@ void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Calling convention as default by an ABI.
llvm::CallingConv::ID DefaultCC;
- llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName();
- if (Env == "device")
+ const LangOptions &LangOpts = getContext().getLangOptions();
+ if (LangOpts.OpenCL || LangOpts.CUDA) {
+ // If we are in OpenCL or CUDA mode, then default to device functions
DefaultCC = llvm::CallingConv::PTX_Device;
- else
- DefaultCC = llvm::CallingConv::PTX_Kernel;
-
+ } else {
+ // If we are in standard C/C++ mode, use the triple to decide on the default
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ if (Env == "device")
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ else
+ DefaultCC = llvm::CallingConv::PTX_Kernel;
+ }
FI.setEffectiveCallingConvention(DefaultCC);
+
}
llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -2667,6 +2796,36 @@ llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return 0;
}
+void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOptions().OpenCL) {
+ // Use OpenCL function attributes to set proper calling conventions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get a kernel calling convention
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOptions().CUDA) {
+ // CUDA __global__ functions get a kernel calling convention. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->getAttr<CUDAGlobalAttr>())
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ }
+}
+
}
//===----------------------------------------------------------------------===//
@@ -2891,7 +3050,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() + 0xffe0;
new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
- "vector_" + llvm::Twine::utohexstr(Num),
+ "vector_" + Twine::utohexstr(Num),
GV, &M.getModule());
}
}
@@ -2904,6 +3063,7 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MipsABIInfo : public ABIInfo {
+ static const unsigned MinABIStackAlignInBytes = 4;
public:
MipsABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
@@ -2914,10 +3074,13 @@ public:
CodeGenFunction &CGF) const;
};
+const unsigned MipsABIInfo::MinABIStackAlignInBytes;
+
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
public:
- MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new MipsABIInfo(CGT)) {}
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, unsigned SZ)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT)), SizeOfUnwindException(SZ) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 29;
@@ -2925,6 +3088,10 @@ public:
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const;
+
+ unsigned getSizeOfUnwindException() const {
+ return SizeOfUnwindException;
+ }
};
}
@@ -2934,6 +3101,11 @@ ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const {
if (getContext().getTypeSize(Ty) == 0)
return ABIArgInfo::getIgnore();
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
return ABIArgInfo::getIndirect(0);
}
@@ -2973,7 +3145,37 @@ void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- return 0;
+ llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped;
+
+ if (TypeAlign > MinABIStackAlignInBytes) {
+ llvm::Value *AddrAsInt32 = CGF.Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ llvm::Value *Inc = llvm::ConstantInt::get(CGF.Int32Ty, TypeAlign - 1);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -TypeAlign);
+ llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt32, Inc);
+ llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
+ AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
+ }
+ else
+ AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
+ TypeAlign = std::max(TypeAlign, MinABIStackAlignInBytes);
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
}
bool
@@ -2987,7 +3189,7 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// Everything on MIPS is 4 bytes. Double-precision FP registers
// are aliased to pairs of single-precision FP registers.
- const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
// 0-31 are the general purpose registers, $0 - $31.
@@ -3009,29 +3211,98 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
return false;
}
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : DefaultTargetCodeGenInfo(CGT) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOptions().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
+
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Value*, 5> Operands;
+ Operands.push_back(F);
+
+ Operands.push_back(llvm::Constant::getIntegerValue(
+ llvm::Type::getInt32Ty(Context),
+ llvm::APInt(
+ 32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(
+ llvm::Type::getInt32Ty(Context),
+ llvm::APInt(
+ 32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(
+ llvm::Type::getInt32Ty(Context),
+ llvm::APInt(
+ 32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
+
+ // Add a boolean constant operand for "required" (true) or "hint" (false)
+ // for implementing the work_group_size_hint attr later. Currently
+ // always true as the hint is not yet implemented.
+ Operands.push_back(llvm::ConstantInt::getTrue(llvm::Type::getInt1Ty(Context)));
+
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
- // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
- // free it.
-
- const llvm::Triple &Triple = getContext().Target.getTriple();
+ const llvm::Triple &Triple = getContext().getTargetInfo().getTriple();
switch (Triple.getArch()) {
default:
return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types));
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 24));
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, 32));
case llvm::Triple::arm:
case llvm::Triple::thumb:
{
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+ if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
Kind = ARMABIInfo::APCS;
else if (CodeGenOpts.FloatABI == "hard")
Kind = ARMABIInfo::AAPCS_VFP;
@@ -3055,8 +3326,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+ case llvm::Triple::tce:
+ return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
+
case llvm::Triple::x86: {
- bool DisableMMX = strcmp(getContext().Target.getABI(), "no-mmx") == 0;
+ bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0;
if (Triple.isOSDarwin())
return *(TheTargetCodeGenInfo =
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index d5e8884cdffe..8f90c7bdd92d 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -15,6 +15,8 @@
#ifndef CLANG_CODEGEN_TARGETINFO_H
#define CLANG_CODEGEN_TARGETINFO_H
+#include "clang/Basic/LLVM.h"
+#include "clang/AST/Type.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
@@ -58,7 +60,7 @@ namespace clang {
/// uint64 private_1;
/// uint64 private_2;
/// };
- unsigned getSizeOfUnwindException() const { return 32; }
+ virtual unsigned getSizeOfUnwindException() const;
/// Controls whether __builtin_extend_pointer should sign-extend
/// pointers to uint64_t or zero-extend them (the default). Has
@@ -107,7 +109,7 @@ namespace clang {
}
virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- llvm::StringRef Constraint,
+ StringRef Constraint,
llvm::Type* Ty) const {
return Ty;
}
@@ -122,9 +124,43 @@ namespace clang {
/// a particular instruction sequence. This functions returns
/// that instruction sequence in inline assembly, which will be
/// empty if none is required.
- virtual llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
return "";
}
+
+ /// Determine whether a call to an unprototyped functions under
+ /// the given calling convention should use the variadic
+ /// convention or the non-variadic convention.
+ ///
+ /// There's a good reason to make a platform's variadic calling
+ /// convention be different from its non-variadic calling
+ /// convention: the non-variadic arguments can be passed in
+ /// registers (better for performance), and the variadic arguments
+ /// can be passed on the stack (also better for performance). If
+ /// this is done, however, unprototyped functions *must* use the
+ /// non-variadic convention, because C99 states that a call
+ /// through an unprototyped function type must succeed if the
+ /// function was defined with a non-variadic prototype with
+ /// compatible parameters. Therefore, splitting the conventions
+ /// makes it impossible to call a variadic function through an
+ /// unprototyped type. Since function prototypes came out in the
+ /// late 1970s, this is probably an acceptable trade-off.
+ /// Nonetheless, not all platforms are willing to make it, and in
+ /// particularly x86-64 bends over backwards to make the
+ /// conventions compatible.
+ ///
+ /// The default is false. This is correct whenever:
+ /// - the conventions are exactly the same, because it does not
+ /// matter and the resulting IR will be somewhat prettier in
+ /// certain cases; or
+ /// - the conventions are substantively different in how they pass
+ /// arguments, because in this case using the variadic convention
+ /// will lead to C99 violations.
+ /// It is not necessarily correct when arguments are passed in the
+ /// same way and some out-of-band information is passed for the
+ /// benefit of variadic callees, as is the case for x86-64.
+ /// In this case the ABI should be consulted.
+ virtual bool isNoProtoCallVariadic(CallingConv CC) const;
};
}