aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h106
-rw-r--r--lib/CodeGen/CGBlocks.cpp197
-rw-r--r--lib/CodeGen/CGBlocks.h18
-rw-r--r--lib/CodeGen/CGBuiltin.cpp388
-rw-r--r--lib/CodeGen/CGCXX.cpp190
-rw-r--r--lib/CodeGen/CGCXXABI.h200
-rw-r--r--lib/CodeGen/CGCall.cpp481
-rw-r--r--lib/CodeGen/CGClass.cpp450
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp477
-rw-r--r--lib/CodeGen/CGDebugInfo.h51
-rw-r--r--lib/CodeGen/CGDecl.cpp164
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp72
-rw-r--r--lib/CodeGen/CGException.cpp578
-rw-r--r--lib/CodeGen/CGException.h313
-rw-r--r--lib/CodeGen/CGExpr.cpp501
-rw-r--r--lib/CodeGen/CGExprAgg.cpp201
-rw-r--r--lib/CodeGen/CGExprCXX.cpp860
-rw-r--r--lib/CodeGen/CGExprComplex.cpp4
-rw-r--r--lib/CodeGen/CGExprConstant.cpp125
-rw-r--r--lib/CodeGen/CGExprScalar.cpp463
-rw-r--r--lib/CodeGen/CGObjC.cpp15
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp64
-rw-r--r--lib/CodeGen/CGObjCMac.cpp993
-rw-r--r--lib/CodeGen/CGObjCRuntime.h13
-rw-r--r--lib/CodeGen/CGRTTI.cpp192
-rw-r--r--lib/CodeGen/CGRecordLayout.h28
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp81
-rw-r--r--lib/CodeGen/CGStmt.cpp131
-rw-r--r--lib/CodeGen/CGTemporaries.cpp86
-rw-r--r--lib/CodeGen/CGVTT.cpp3
-rw-r--r--lib/CodeGen/CGVTables.cpp92
-rw-r--r--lib/CodeGen/CGValue.h71
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp1125
-rw-r--r--lib/CodeGen/CodeGenFunction.h321
-rw-r--r--lib/CodeGen/CodeGenModule.cpp454
-rw-r--r--lib/CodeGen/CodeGenModule.h77
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp39
-rw-r--r--lib/CodeGen/CodeGenTypes.h28
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp992
-rw-r--r--lib/CodeGen/Makefile1
-rw-r--r--lib/CodeGen/Mangle.cpp120
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp37
-rw-r--r--lib/CodeGen/TargetInfo.cpp1183
-rw-r--r--lib/CodeGen/TargetInfo.h10
44 files changed, 7641 insertions, 4354 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 85524acbe1f7..91b7742557f5 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -16,52 +16,52 @@
namespace llvm {
class Value;
class LLVMContext;
+ class TargetData;
}
namespace clang {
class ASTContext;
- // FIXME: This is a layering issue if we want to move ABIInfo
- // down. Fortunately CGFunctionInfo has no real tie to CodeGen.
namespace CodeGen {
class CGFunctionInfo;
class CodeGenFunction;
+ class CodeGenTypes;
}
- /* FIXME: All of this stuff should be part of the target interface
- somehow. It is currently here because it is not clear how to factor
- the targets to support this, since the Targets currently live in a
- layer below types n'stuff.
- */
+ // FIXME: All of this stuff should be part of the target interface
+ // somehow. It is currently here because it is not clear how to factor
+ // the targets to support this, since the Targets currently live in a
+ // layer below types n'stuff.
/// ABIArgInfo - Helper class to encapsulate information about how a
/// specific C type should be passed to or returned from a function.
class ABIArgInfo {
public:
enum Kind {
- Direct, /// Pass the argument directly using the normal
- /// converted LLVM type. Complex and structure types
- /// are passed using first class aggregates.
-
- Extend, /// Valid only for integer argument types. Same as 'direct'
- /// but also emit a zero/sign extension attribute.
-
- Indirect, /// Pass the argument indirectly via a hidden pointer
- /// with the specified alignment (0 indicates default
- /// alignment).
-
- Ignore, /// Ignore the argument (treat as void). Useful for
- /// void and empty structs.
-
- Coerce, /// Only valid for aggregate return types, the argument
- /// should be accessed by coercion to a provided type.
-
- Expand, /// Only valid for aggregate argument types. The
- /// structure should be expanded into consecutive
- /// arguments for its constituent fields. Currently
- /// expand is only allowed on structures whose fields
- /// are all scalar types or are themselves expandable
- /// types.
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
KindFirst=Direct, KindLast=Expand
};
@@ -79,18 +79,15 @@ namespace clang {
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect() {
- return ABIArgInfo(Direct);
+ static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ return ABIArgInfo(Direct, T, Offset);
}
- static ABIArgInfo getExtend() {
- return ABIArgInfo(Extend);
+ static ABIArgInfo getExtend(const llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0);
}
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
- static ABIArgInfo getCoerce(const llvm::Type *T) {
- return ABIArgInfo(Coerce, T);
- }
static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
return ABIArgInfo(Indirect, 0, Alignment, ByVal);
}
@@ -102,16 +99,28 @@ namespace clang {
bool isDirect() const { return TheKind == Direct; }
bool isExtend() const { return TheKind == Extend; }
bool isIgnore() const { return TheKind == Ignore; }
- bool isCoerce() const { return TheKind == Coerce; }
bool isIndirect() const { return TheKind == Indirect; }
bool isExpand() const { return TheKind == Expand; }
- // Coerce accessors
+ bool canHaveCoerceToType() const {
+ return TheKind == Direct || TheKind == Extend;
+ }
+
+ // Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
const llvm::Type *getCoerceToType() const {
- assert(TheKind == Coerce && "Invalid kind!");
+ assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
}
-
+
+ void setCoerceToType(const llvm::Type *T) {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ TypeData = T;
+ }
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
@@ -130,15 +139,16 @@ namespace clang {
/// passed or returned from functions.
class ABIInfo {
public:
+ CodeGen::CodeGenTypes &CGT;
+
+ ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
virtual ~ABIInfo();
+
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::TargetData &getTargetData() const;
- virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
- ASTContext &Ctx,
- llvm::LLVMContext &VMContext,
- // This is the preferred type for argument lowering
- // which can be used to generate better IR.
- const llvm::Type *const *PrefTypes = 0,
- unsigned NumPrefTypes = 0) const = 0;
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index cb9e63622185..04f1ef24b297 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -24,36 +24,6 @@
using namespace clang;
using namespace CodeGen;
-/// CGBlockInfo - Information to generate a block literal.
-class clang::CodeGen::CGBlockInfo {
-public:
- /// Name - The name of the block, kindof.
- const char *Name;
-
- /// DeclRefs - Variables from parent scopes that have been
- /// imported into this block.
- llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
-
- /// InnerBlocks - This block and the blocks it encloses.
- llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
-
- /// CXXThisRef - Non-null if 'this' was required somewhere, in
- /// which case this is that expression.
- const CXXThisExpr *CXXThisRef;
-
- /// NeedsObjCSelf - True if something in this block has an implicit
- /// reference to 'self'.
- bool NeedsObjCSelf;
-
- /// These are initialized by GenerateBlockFunction.
- bool BlockHasCopyDispose;
- CharUnits BlockSize;
- CharUnits BlockAlign;
- llvm::SmallVector<const Expr*, 8> BlockLayout;
-
- CGBlockInfo(const char *Name);
-};
-
CGBlockInfo::CGBlockInfo(const char *N)
: Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
@@ -64,9 +34,12 @@ CGBlockInfo::CGBlockInfo(const char *N)
llvm::Constant *CodeGenFunction::
-BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size,
+BuildDescriptorBlockDecl(const BlockExpr *BE, const CGBlockInfo &Info,
const llvm::StructType* Ty,
+ llvm::Constant *BlockVarLayout,
std::vector<HelperInfo> *NoteForHelper) {
+ bool BlockHasCopyDispose = Info.BlockHasCopyDispose;
+ CharUnits Size = Info.BlockSize;
const llvm::Type *UnsignedLongTy
= CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
llvm::Constant *C;
@@ -100,7 +73,8 @@ BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnit
CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
// Layout.
- C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ C = BlockVarLayout;
+
Elts.push_back(C);
C = llvm::ConstantStruct::get(VMContext, Elts, false);
@@ -111,20 +85,6 @@ BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnit
return C;
}
-llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
- if (NSConcreteGlobalBlock == 0)
- NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
- "_NSConcreteGlobalBlock");
- return NSConcreteGlobalBlock;
-}
-
-llvm::Constant *BlockModule::getNSConcreteStackBlock() {
- if (NSConcreteStackBlock == 0)
- NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
- "_NSConcreteStackBlock");
- return NSConcreteStackBlock;
-}
-
static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I != E; ++I)
@@ -198,6 +158,21 @@ static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
}
}
+static unsigned computeBlockFlag(CodeGenModule &CGM,
+ const BlockExpr *BE, unsigned flags) {
+ QualType BPT = BE->getType();
+ const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
+ QualType ResultType = ftype->getResultType();
+
+ CallArgList Args;
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
+ FunctionType::ExtInfo());
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ flags |= CodeGenFunction::BLOCK_USE_STRET;
+ return flags;
+}
+
// FIXME: Push most into CGM, passing down a few bits, like current function
// name.
llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
@@ -221,6 +196,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *V;
{
+ llvm::Constant *BlockVarLayout;
// C = BuildBlockStructInitlist();
unsigned int flags = BLOCK_HAS_SIGNATURE;
@@ -229,6 +205,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// __invoke
llvm::Function *Fn
= CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
+ BlockVarLayout,
LocalDeclMap);
BlockHasCopyDispose |= Info.BlockHasCopyDispose;
Elts[3] = Fn;
@@ -244,18 +221,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
Elts[0] = C;
// __flags
- {
- QualType BPT = BE->getType();
- const FunctionType *ftype = BPT->getPointeeType()->getAs<FunctionType>();
- QualType ResultType = ftype->getResultType();
-
- CallArgList Args;
- CodeGenTypes &Types = CGM.getTypes();
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
- FunctionType::ExtInfo());
- if (CGM.ReturnTypeUsesSRet(FnInfo))
- flags |= BLOCK_USE_STRET;
- }
+ flags = computeBlockFlag(CGM, BE, flags);
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
C = llvm::ConstantInt::get(IntTy, flags);
@@ -267,8 +233,8 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
if (Info.BlockLayout.empty()) {
// __descriptor
- Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose,
- Info.BlockSize, 0, 0);
+ C = llvm::Constant::getNullValue(PtrToInt8Ty);
+ Elts[4] = BuildDescriptorBlockDecl(BE, Info, 0, C, 0);
// Optimize to being a global block.
Elts[0] = CGM.getNSConcreteGlobalBlock();
@@ -371,7 +337,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
SourceLocation());
if (VD->getType()->isReferenceType()) {
E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
getContext().getPointerType(E->getType()),
SourceLocation());
}
@@ -381,7 +347,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
if (BDRE->isByRef()) {
E = new (getContext())
- UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ UnaryOperator(const_cast<Expr*>(E), UO_AddrOf,
getContext().getPointerType(E->getType()),
SourceLocation());
}
@@ -422,9 +388,8 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
NoteForHelper.resize(NumHelpers);
// __descriptor
- llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE,
- Info.BlockHasCopyDispose,
- Info.BlockSize, Ty,
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE, Info, Ty,
+ BlockVarLayout,
&NoteForHelper);
Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
@@ -693,18 +658,23 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
std::vector<llvm::Constant*> LiteralFields(FieldCount);
CGBlockInfo Info(n);
+ llvm::Constant *BlockVarLayout;
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap);
+ = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE,
+ Info, 0, BlockVarLayout,
+ LocalDeclMap);
assert(Info.BlockSize == BlockLiteralSize
&& "no imports allowed for global block");
// isa
- LiteralFields[0] = getNSConcreteGlobalBlock();
+ LiteralFields[0] = CGM.getNSConcreteGlobalBlock();
- // Flags
+ // __flags
+ unsigned flags = computeBlockFlag(CGM, BE,
+ (BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE));
LiteralFields[1] =
- llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE);
+ llvm::ConstantInt::get(IntTy, flags);
// Reserved
LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
@@ -737,6 +707,7 @@ llvm::Function *
CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
+ llvm::Constant *& BlockVarLayout,
llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
// Check if we should generate debug info for this block.
@@ -751,7 +722,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
++i) {
const VarDecl *VD = dyn_cast<VarDecl>(i->first);
- if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
+ if (VD->getStorageClass() == SC_Static || VD->hasExternalStorage())
LocalDeclMap[VD] = i->second;
}
@@ -784,6 +755,12 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
// Build the block struct now.
AllocateAllBlockDeclRefs(*this, Info);
+ // Capture block layout info. here.
+ if (CGM.getContext().getLangOptions().ObjC1)
+ BlockVarLayout = CGM.getObjCRuntime().GCBlockLayout(*this, Info.DeclRefs);
+ else
+ BlockVarLayout = llvm::Constant::getNullValue(PtrToInt8Ty);
+
QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
BlockLayout);
@@ -813,7 +790,11 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
Name.getString(), &CGM.getModule());
CGM.SetInternalFunctionAttributes(BD, Fn, FI);
-
+ StartFunction(BD, ResultType, Fn, Args,
+ BExpr->getBody()->getLocEnd());
+
+ CurFuncDecl = OuterFuncDecl;
+
QualType FnType(BlockFunctionType, 0);
bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType);
@@ -822,15 +803,22 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr,
getContext().getTranslationUnitDecl(),
SourceLocation(), ID, FnType,
0,
- FunctionDecl::Static,
- FunctionDecl::None,
+ SC_Static,
+ SC_None,
false, HasPrototype);
+ if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FnType)) {
+ const FunctionDecl *CFD = dyn_cast<FunctionDecl>(CurCodeDecl);
+ FunctionDecl *FD = const_cast<FunctionDecl *>(CFD);
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
+ Params.push_back(ParmVarDecl::Create(getContext(), FD,
+ SourceLocation(), 0,
+ FT->getArgType(i), /*TInfo=*/0,
+ SC_None, SC_None, 0));
+ FD->setParams(Params.data(), Params.size());
+ }
+
- StartFunction(BD, ResultType, Fn, Args,
- BExpr->getBody()->getLocEnd());
-
- CurFuncDecl = OuterFuncDecl;
-
// If we have a C++ 'this' reference, go ahead and force it into
// existence now.
if (Info.CXXThisRef) {
@@ -928,7 +916,7 @@ CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
getContext().getTranslationUnitDecl(),
SourceLocation(),
0, QualType(PadTy), 0,
- VarDecl::None, VarDecl::None);
+ SC_None, SC_None);
Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
SourceLocation());
BlockLayout.push_back(E);
@@ -974,8 +962,8 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
+ SC_Static,
+ SC_None,
false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -1012,7 +1000,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = getBlockObjectAssign();
+ llvm::Value *F = CGM.getBlockObjectAssign();
Builder.CreateCall3(F, Dstv, Srcv, N);
}
}
@@ -1056,8 +1044,8 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
+ SC_Static,
+ SC_None,
false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -1141,8 +1129,8 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
+ SC_Static,
+ SC_None,
false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -1164,7 +1152,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
flag |= BLOCK_BYREF_CALLER;
llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
- llvm::Value *F = getBlockObjectAssign();
+ llvm::Value *F = CGM.getBlockObjectAssign();
Builder.CreateCall3(F, DstObj, SrcObj, N);
CGF.FinishFunction();
@@ -1205,8 +1193,8 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
SourceLocation(), II, R, 0,
- FunctionDecl::Static,
- FunctionDecl::None,
+ SC_Static,
+ SC_None,
false, true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -1259,37 +1247,8 @@ llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
}
-llvm::Value *BlockFunction::getBlockObjectDispose() {
- if (CGM.BlockObjectDispose == 0) {
- const llvm::FunctionType *FTy;
- std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(CGF.Int32Ty);
- FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
- CGM.BlockObjectDispose
- = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
- }
- return CGM.BlockObjectDispose;
-}
-
-llvm::Value *BlockFunction::getBlockObjectAssign() {
- if (CGM.BlockObjectAssign == 0) {
- const llvm::FunctionType *FTy;
- std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(CGF.Int32Ty);
- FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
- CGM.BlockObjectAssign
- = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
- }
- return CGM.BlockObjectAssign;
-}
-
void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
- llvm::Value *F = getBlockObjectDispose();
+ llvm::Value *F = CGM.getBlockObjectDispose();
llvm::Value *N;
V = Builder.CreateBitCast(V, PtrToInt8Ty);
N = llvm::ConstantInt::get(CGF.Int32Ty, flag);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 772a62c24f1f..743e3c83be33 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -73,8 +73,6 @@ class BlockModule : public BlockBase {
CodeGenTypes &getTypes() { return Types; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
public:
- llvm::Constant *getNSConcreteGlobalBlock();
- llvm::Constant *getNSConcreteStackBlock();
int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
const llvm::Type *getBlockDescriptorType();
@@ -82,14 +80,6 @@ public:
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
- /// NSConcreteGlobalBlock - Cached reference to the class pointer for global
- /// blocks.
- llvm::Constant *NSConcreteGlobalBlock;
-
- /// NSConcreteStackBlock - Cached reference to the class poinnter for stack
- /// blocks.
- llvm::Constant *NSConcreteStackBlock;
-
const llvm::Type *BlockDescriptorType;
const llvm::Type *GenericBlockLiteralType;
@@ -97,8 +87,6 @@ public:
int GlobalUniqueCount;
} Block;
- llvm::Value *BlockObjectAssign;
- llvm::Value *BlockObjectDispose;
const llvm::PointerType *PtrToInt8Ty;
std::map<uint64_t, llvm::Constant *> AssignCache;
@@ -108,9 +96,7 @@ public:
CodeGenTypes &T, CodeGenModule &CodeGen)
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
CGM(CodeGen), VMContext(M.getContext()),
- NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
- GenericBlockLiteralType(0),
- BlockObjectAssign(0), BlockObjectDispose(0) {
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
Block.GlobalUniqueCount = 0;
PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
}
@@ -207,8 +193,6 @@ public:
llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
unsigned Align);
- llvm::Value *getBlockObjectAssign();
- llvm::Value *getBlockObjectDispose();
void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
bool BlockRequiresCopying(QualType Ty)
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index fff4bacab6b5..986f621f64f5 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -41,6 +41,31 @@ static void EmitMemoryBarrier(CodeGenFunction &CGF,
C, C + 5);
}
+static Value *EmitCastToInt(CodeGenFunction &CGF,
+ const llvm::Type *ToType, Value *Val) {
+ if (Val->getType()->isPointerTy()) {
+ return CGF.Builder.CreatePtrToInt(Val, ToType);
+ }
+ assert(Val->getType()->isIntegerTy() &&
+ "Used a non-integer and non-pointer type with atomic builtin");
+ assert(Val->getType()->getScalarSizeInBits() <=
+ ToType->getScalarSizeInBits() && "Integer type too small");
+ return CGF.Builder.CreateSExtOrBitCast(Val, ToType);
+}
+
+static Value *EmitCastFromInt(CodeGenFunction &CGF, QualType ToQualType,
+ Value *Val) {
+ const llvm::Type *ToType = CGF.ConvertType(ToQualType);
+ if (ToType->isPointerTy()) {
+ return CGF.Builder.CreateIntToPtr(Val, ToType);
+ }
+ assert(Val->getType()->isIntegerTy() &&
+ "Used a non-integer and non-pointer type with atomic builtin");
+ assert(Val->getType()->getScalarSizeInBits() >=
+ ToType->getScalarSizeInBits() && "Integer type too small");
+ return CGF.Builder.CreateTruncOrBitCast(Val, ToType);
+}
+
// The atomic builtins are also full memory barriers. This is a utility for
// wrapping a call to the builtins with memory barriers.
static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
@@ -60,13 +85,20 @@ static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
/// and the expression node.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E) {
- Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)),
- CGF.EmitScalarExpr(E->getArg(1)) };
- const llvm::Type *ResType[2];
- ResType[0] = CGF.ConvertType(E->getType());
- ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
- Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
- return RValue::get(EmitCallWithBarrier(CGF, AtomF, Args, Args + 2));
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))) };
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ EmitCallWithBarrier(CGF, AtomF, Args,
+ Args + 2)));
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
@@ -75,14 +107,21 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
- const llvm::Type *ResType[2];
- ResType[0] = CGF.ConvertType(E->getType());
- ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
- Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
- Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)),
- CGF.EmitScalarExpr(E->getArg(1)) };
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes, 2);
+
+ Value *Args[2] = { CGF.Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))) };
Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
- return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1]));
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ CGF.Builder.CreateBinOp(Op, Result,
+ Args[1])));
}
/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
@@ -245,9 +284,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
- case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect: {
// FIXME: pass expect through to LLVM
+ if (E->getArg(1)->HasSideEffects(getContext()))
+ (void)EmitScalarExpr(E->getArg(1));
return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
@@ -746,14 +788,23 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16: {
- const llvm::Type *ResType[2];
- ResType[0]= ConvertType(E->getType());
- ResType[1] = ConvertType(E->getArg(0)->getType());
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
- Value *Args[3] = { EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)) };
- return RValue::get(EmitCallWithBarrier(*this, AtomF, Args, Args + 3));
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
+ IntrinsicTypes, 2);
+
+ Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(2))) };
+ return RValue::get(EmitCastFromInt(CGF, E->getType(),
+ EmitCallWithBarrier(CGF, AtomF, Args,
+ Args + 3)));
}
case Builtin::BI__sync_bool_compare_and_swap_1:
@@ -761,14 +812,22 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16: {
- const llvm::Type *ResType[2];
- ResType[0]= ConvertType(E->getArg(1)->getType());
- ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
- Value *OldVal = EmitScalarExpr(E->getArg(1));
- Value *Args[3] = { EmitScalarExpr(E->getArg(0)),
- OldVal,
- EmitScalarExpr(E->getArg(2)) };
+ const llvm::Type *ValueType =
+ llvm::IntegerType::get(
+ CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(E->getArg(1)->getType()));
+ const llvm::Type *PtrType = ValueType->getPointerTo();
+ const llvm::Type *IntrinsicTypes[2] = { ValueType, PtrType };
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
+ IntrinsicTypes, 2);
+
+ Value *Args[3] = { Builder.CreateBitCast(CGF.EmitScalarExpr(E->getArg(0)),
+ PtrType),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(1))),
+ EmitCastToInt(CGF, ValueType,
+ CGF.EmitScalarExpr(E->getArg(2))) };
+ Value *OldVal = Args[1];
Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args, Args + 3);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
@@ -953,8 +1012,10 @@ const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) {
return 0;
}
-Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, bool widen) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ if (widen)
+ nElts <<= 1;
SmallVector<Constant*, 16> Indices(nElts, C);
Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
return Builder.CreateShuffleVector(V, V, SV, "lane");
@@ -989,6 +1050,28 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
return llvm::ConstantVector::get(CV.begin(), CV.size());
}
+/// GetPointeeAlignment - Given an expression with a pointer type, find the
+/// alignment of the type referenced by the pointer. Skip over implicit
+/// casts.
+static Value *GetPointeeAlignment(CodeGenFunction &CGF, const Expr *Addr) {
+ unsigned Align = 1;
+ // Check if the type is a pointer. The implicit cast operand might not be.
+ while (Addr->getType()->isPointerType()) {
+ QualType PtTy = Addr->getType()->getPointeeType();
+ unsigned NewA = CGF.getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+
+ // If the address is an implicit cast, repeat with the cast operand.
+ if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
+ Addr = CastAddr->getSubExpr();
+ continue;
+ }
+ break;
+ }
+ return llvm::ConstantInt::get(CGF.Int32Ty, Align);
+}
+
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == ARM::BI__clear_cache) {
@@ -1002,9 +1085,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
a, b);
}
- // Determine the type of this overloaded NEON intrinsic.
- assert(BuiltinID > ARM::BI__builtin_thread_pointer);
-
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
@@ -1014,6 +1094,25 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (!Arg->isIntegerConstantExpr(Result, getContext()))
return 0;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ // Determine the overloaded type of this builtin.
+ const llvm::Type *Ty;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ Ty = llvm::Type::getFloatTy(VMContext);
+ else
+ Ty = llvm::Type::getDoubleTy(VMContext);
+
+ // Determine whether this is an unsigned conversion or not.
+ bool usgn = Result.getZExtValue() == 1;
+ unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
+
+ // Call the appropriate intrinsic.
+ Function *F = CGM.getIntrinsic(Int, &Ty, 1);
+ return Builder.CreateCall(F, Ops.begin(), Ops.end(), "vcvtr");
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
unsigned type = Result.getZExtValue();
bool usgn = type & 0x08;
bool quad = type & 0x10;
@@ -1029,19 +1128,36 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
case ARM::BI__builtin_neon_vaba_v:
- case ARM::BI__builtin_neon_vabaq_v:
- Int = usgn ? Intrinsic::arm_neon_vabau : Intrinsic::arm_neon_vabas;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaba");
- case ARM::BI__builtin_neon_vabal_v:
- Int = usgn ? Intrinsic::arm_neon_vabalu : Intrinsic::arm_neon_vabals;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabal");
+ case ARM::BI__builtin_neon_vabaq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ SmallVector<Value*, 2> Args;
+ Args.push_back(Ops[1]);
+ Args.push_back(Ops[2]);
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Args, "vaba");
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaba");
+ }
+ case ARM::BI__builtin_neon_vabal_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ SmallVector<Value*, 2> Args;
+ Args.push_back(Ops[1]);
+ Args.push_back(Ops[2]);
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[1] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Args, "vabal");
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vabal");
+ }
case ARM::BI__builtin_neon_vabd_v:
case ARM::BI__builtin_neon_vabdq_v:
Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd");
- case ARM::BI__builtin_neon_vabdl_v:
- Int = usgn ? Intrinsic::arm_neon_vabdlu : Intrinsic::arm_neon_vabdls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabdl");
+ case ARM::BI__builtin_neon_vabdl_v: {
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, &DTy, 1), Ops, "vabdl");
+ return Builder.CreateZExt(Ops[0], Ty, "vabdl");
+ }
case ARM::BI__builtin_neon_vabs_v:
case ARM::BI__builtin_neon_vabsq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1),
@@ -1049,12 +1165,29 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vaddhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1),
Ops, "vaddhn");
- case ARM::BI__builtin_neon_vaddl_v:
- Int = usgn ? Intrinsic::arm_neon_vaddlu : Intrinsic::arm_neon_vaddls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddl");
- case ARM::BI__builtin_neon_vaddw_v:
- Int = usgn ? Intrinsic::arm_neon_vaddws : Intrinsic::arm_neon_vaddwu;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddw");
+ case ARM::BI__builtin_neon_vaddl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaddl");
+ }
+ case ARM::BI__builtin_neon_vaddw_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn)
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ else
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vaddw");
+ }
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcage_v: {
@@ -1126,6 +1259,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Int, Tys, 2);
return EmitNeonCall(F, Ops, "vcvt_n");
}
+ case ARM::BI__builtin_neon_vdup_lane_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]));
+ case ARM::BI__builtin_neon_vdupq_lane_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return EmitNeonSplat(Ops[0], cast<Constant>(Ops[1]), true);
case ARM::BI__builtin_neon_vext_v:
case ARM::BI__builtin_neon_vextq_v: {
ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]);
@@ -1161,6 +1300,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub");
case ARM::BI__builtin_neon_vld1_v:
case ARM::BI__builtin_neon_vld1q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1),
Ops, "vld1");
case ARM::BI__builtin_neon_vld1_lane_v:
@@ -1183,7 +1323,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld2_v:
case ARM::BI__builtin_neon_vld2q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1191,7 +1332,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld3_v:
case ARM::BI__builtin_neon_vld3q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1199,7 +1341,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_v:
case ARM::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1);
- Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
+ Value *Align = GetPointeeAlignment(*this, E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1209,6 +1352,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1220,6 +1364,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1232,6 +1377,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -1261,6 +1407,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
+ Args.push_back(GetPointeeAlignment(*this, E->getArg(1)));
Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup");
// splat lane 0 to all elts in each vector of the result.
@@ -1283,28 +1430,79 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vminq_v:
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin");
- case ARM::BI__builtin_neon_vmlal_lane_v:
- splat = true;
- case ARM::BI__builtin_neon_vmlal_v:
- Int = usgn ? Intrinsic::arm_neon_vmlalu : Intrinsic::arm_neon_vmlals;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
- case ARM::BI__builtin_neon_vmlsl_lane_v:
- splat = true;
- case ARM::BI__builtin_neon_vmlsl_v:
- Int = usgn ? Intrinsic::arm_neon_vmlslu : Intrinsic::arm_neon_vmlsls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlsl", splat);
- case ARM::BI__builtin_neon_vmovl_v:
- Int = usgn ? Intrinsic::arm_neon_vmovlu : Intrinsic::arm_neon_vmovls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmovl");
- case ARM::BI__builtin_neon_vmovn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmovn, &Ty, 1),
- Ops, "vmovn");
- case ARM::BI__builtin_neon_vmull_lane_v:
- splat = true;
- case ARM::BI__builtin_neon_vmull_v:
- Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
- Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat);
+ case ARM::BI__builtin_neon_vmlal_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
+ }
+ case ARM::BI__builtin_neon_vmlal_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ if (usgn) {
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateZExt(Ops[2], Ty);
+ } else {
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateSExt(Ops[2], Ty);
+ }
+ Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vmlal");
+ }
+ case ARM::BI__builtin_neon_vmlsl_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<Constant>(Ops[3]));
+ }
+ case ARM::BI__builtin_neon_vmlsl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[2] = Builder.CreateBitCast(Ops[2], DTy);
+ if (usgn) {
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateZExt(Ops[2], Ty);
+ } else {
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ Ops[2] = Builder.CreateSExt(Ops[2], Ty);
+ }
+ Ops[1] = Builder.CreateMul(Ops[1], Ops[2]);
+ return Builder.CreateSub(Ops[0], Ops[1], "vmlsl");
+ }
+ case ARM::BI__builtin_neon_vmovl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ if (usgn)
+ return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+ return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+ }
+ case ARM::BI__builtin_neon_vmovn_v: {
+ const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+ return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+ }
+ case ARM::BI__builtin_neon_vmull_lane_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ Ops[1] = EmitNeonSplat(Ops[1], cast<Constant>(Ops[2]));
+ }
+ case ARM::BI__builtin_neon_vmull_v: {
+ if (poly)
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmullp, &Ty, 1),
+ Ops, "vmull");
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateMul(Ops[0], Ops[1], "vmull");
+ }
case ARM::BI__builtin_neon_vpadal_v:
case ARM::BI__builtin_neon_vpadalq_v:
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
@@ -1503,6 +1701,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst1_lane_v:
@@ -1513,37 +1712,60 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
case ARM::BI__builtin_neon_vst2_v:
case ARM::BI__builtin_neon_vst2q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst2_lane_v:
case ARM::BI__builtin_neon_vst2q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst3_v:
case ARM::BI__builtin_neon_vst3q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst3_lane_v:
case ARM::BI__builtin_neon_vst3q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst4_v:
case ARM::BI__builtin_neon_vst4q_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vst4_lane_v:
case ARM::BI__builtin_neon_vst4q_lane_v:
+ Ops.push_back(GetPointeeAlignment(*this, E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1),
Ops, "");
case ARM::BI__builtin_neon_vsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1),
Ops, "vsubhn");
- case ARM::BI__builtin_neon_vsubl_v:
- Int = usgn ? Intrinsic::arm_neon_vsublu : Intrinsic::arm_neon_vsubls;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubl");
- case ARM::BI__builtin_neon_vsubw_v:
- Int = usgn ? Intrinsic::arm_neon_vsubws : Intrinsic::arm_neon_vsubwu;
- return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubw");
+ case ARM::BI__builtin_neon_vsubl_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn) {
+ Ops[0] = Builder.CreateZExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ } else {
+ Ops[0] = Builder.CreateSExt(Ops[0], Ty);
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ }
+ return Builder.CreateSub(Ops[0], Ops[1], "vsubl");
+ }
+ case ARM::BI__builtin_neon_vsubw_v: {
+ const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DTy);
+ if (usgn)
+ Ops[1] = Builder.CreateZExt(Ops[1], Ty);
+ else
+ Ops[1] = Builder.CreateSExt(Ops[1], Ty);
+ return Builder.CreateSub(Ops[0], Ops[1], "vsubw");
+ }
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
@@ -1626,8 +1848,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
- Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)));
- Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)+e));
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 7b7be9a260ed..179716f631a8 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -287,44 +287,6 @@ CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD));
}
-llvm::Constant *
-CodeGenModule::GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD) {
- assert(MD->isInstance() && "Member function must not be static!");
-
- MD = MD->getCanonicalDecl();
-
- const llvm::Type *PtrDiffTy = Types.ConvertType(Context.getPointerDiffType());
-
- // Get the function pointer (or index if this is a virtual function).
- if (MD->isVirtual()) {
- uint64_t Index = VTables.getMethodVTableIndex(MD);
-
- // FIXME: We shouldn't use / 8 here.
- uint64_t PointerWidthInBytes = Context.Target.getPointerWidth(0) / 8;
-
- // Itanium C++ ABI 2.3:
- // For a non-virtual function, this field is a simple function pointer.
- // For a virtual function, it is 1 plus the virtual table offset
- // (in bytes) of the function, represented as a ptrdiff_t.
- return llvm::ConstantInt::get(PtrDiffTy, (Index * PointerWidthInBytes) + 1);
- }
-
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty;
- // Check whether the function has a computable LLVM signature.
- if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
- // The function has a computable LLVM signature; use the correct type.
- Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
- } else {
- // Use an arbitrary non-function type to tell GetAddrOfFunction that the
- // function type is incomplete.
- Ty = PtrDiffTy;
- }
-
- llvm::Constant *FuncPtr = GetAddrOfFunction(MD, Ty);
- return llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
-}
-
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
llvm::Value *This, const llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
@@ -356,4 +318,154 @@ CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
-CXXABI::~CXXABI() {}
+/// Implementation for CGCXXABI. Possibly this should be moved into
+/// the incomplete ABI implementations?
+
+CGCXXABI::~CGCXXABI() {}
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ llvm::StringRef S) {
+ Diagnostic &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+const llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ const llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitMemberPointerConversion(llvm::Constant *C, const CastExpr *E) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(FD->getType(),
+ FD->getParent()->getTypeForDecl()));
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ Params.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(QualType ElementType) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index e1bbb0a79cce..367e345918a2 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -15,23 +15,215 @@
#ifndef CLANG_CODEGEN_CXXABI_H
#define CLANG_CODEGEN_CXXABI_H
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Constant;
+ class Type;
+ class Value;
+
+ template <class T> class SmallVectorImpl;
+}
+
namespace clang {
+ class CastExpr;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CXXRecordDecl;
+ class FieldDecl;
+
namespace CodeGen {
+ class CodeGenFunction;
class CodeGenModule;
class MangleContext;
/// Implements C++ ABI-specific code generation functions.
-class CXXABI {
+class CGCXXABI {
+protected:
+ CodeGenModule &CGM;
+
+ CGCXXABI(CodeGenModule &CGM) : CGM(CGM) {}
+
+protected:
+ ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ return CGF.CXXThisDecl;
+ }
+ llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ return CGF.CXXThisValue;
+ }
+
+ ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
+ return CGF.CXXVTTDecl;
+ }
+ llvm::Value *&getVTTValue(CodeGenFunction &CGF) {
+ return CGF.CXXVTTValue;
+ }
+
+ /// Build a parameter variable suitable for 'this'.
+ void BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+ /// Perform prolog initialization of the parameter variable suitable
+ /// for 'this' emitted by BuildThisParam.
+ void EmitThisParam(CodeGenFunction &CGF);
+
+ ASTContext &getContext() const { return CGM.getContext(); }
+
public:
- virtual ~CXXABI();
+
+ virtual ~CGCXXABI();
/// Gets the mangle context.
virtual MangleContext &getMangleContext() = 0;
+
+ /// Find the LLVM type used to represent the given member pointer
+ /// type.
+ virtual const llvm::Type *
+ ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ /// Load a member function from an object and a member function
+ /// pointer. Apply the this-adjustment and set 'This' to the
+ /// adjusted value.
+ virtual llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Calculate an l-value from an object and a data member pointer.
+ virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Perform a derived-to-base or base-to-derived member pointer
+ /// conversion.
+ virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ /// Perform a derived-to-base or base-to-derived member pointer
+ /// conversion on a constant member pointer.
+ virtual llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E);
+
+ /// Return true if the given member pointer can be zero-initialized
+ /// (in the C++ sense) with an LLVM zeroinitializer.
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ /// Create a null member pointer of the given type.
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ /// Create a member pointer for the given method.
+ virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+
+ /// Create a member pointer for the given field.
+ virtual llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+
+ /// Emit a comparison between two member pointers. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ /// Determine if a member pointer is non-null. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Build the signature of the given constructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void', and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI) and
+ /// will have the formal parameters added to it afterwards.
+ ///
+ /// If there are ever any ABIs where the implicit parameters are
+ /// intermixed with the formal parameters, we can address those
+ /// then.
+ virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the signature of the given destructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void' and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI).
+ virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the ABI-specific portion of the parameter list for a
+ /// function. This generally involves a 'this' parameter and
+ /// possibly some extra data for constructors and destructors.
+ ///
+ /// ABIs may also choose to override the return type, which has been
+ /// initialized with the formal return type of the function.
+ virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) = 0;
+
+ /// Emit the ABI-specific prolog for the function.
+ virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+
+ virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType);
+
+ /**************************** Array cookies ******************************/
+
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. May return 0 to indicate that no
+ /// array cookie is required.
+ ///
+ /// Several cases are filtered out before this method is called:
+ /// - non-array allocations never need a cookie
+ /// - calls to ::operator new(size_t, void*) never need a cookie
+ ///
+ /// \param ElementType - the allocated type of the expression,
+ /// i.e. the pointee type of the expression result type
+ virtual CharUnits GetArrayCookieSize(QualType ElementType);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param NewPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param NumElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case
+ /// \param ElementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+
+ /// Reads the array cookie associated with the given pointer,
+ /// if it has one.
+ ///
+ /// \param Ptr - a pointer to the first element in the array
+ /// \param ElementType - the base element type of elements of the array
+ /// \param NumElements - an out parameter which will be initialized
+ /// with the number of elements allocated, or zero if there is no
+ /// cookie
+ /// \param AllocPtr - an out parameter which will be initialized
+ /// with a char* pointing to the address returned by the allocation
+ /// function
+ /// \param CookieSize - an out parameter which will be initialized
+ /// with the size of the cookie, or zero if there is no cookie
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
};
/// Creates an instance of a C++ ABI class.
-CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
-CXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateARMCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+
}
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 3d1e143dca2b..475dfa5c102a 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "CGCall.h"
+#include "CGCXXABI.h"
#include "ABIInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -35,6 +36,7 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ // TODO: add support for CC_X86Pascal to llvm
}
}
@@ -99,6 +101,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
return CC_C;
}
@@ -116,6 +121,9 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
llvm::SmallVector<CanQualType, 16> ArgTys;
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
// Add the 'this' pointer unless this is a static method.
if (MD->isInstance())
ArgTys.push_back(GetThisType(Context, MD->getParent()));
@@ -126,29 +134,32 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallVector<CanQualType, 16> ArgTys;
-
- // Add the 'this' pointer.
ArgTys.push_back(GetThisType(Context, D->getParent()));
+ CanQualType ResTy = Context.VoidTy;
- // Check if we need to add a VTT parameter (which has type void **).
- if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
- ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+ TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+
+ // Add the formal parameters.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
CXXDtorType Type) {
- llvm::SmallVector<CanQualType, 16> ArgTys;
-
- // Add the 'this' pointer.
+ llvm::SmallVector<CanQualType, 2> ArgTys;
ArgTys.push_back(GetThisType(Context, D->getParent()));
-
- // Check if we need to add a VTT parameter (which has type void **).
- if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
- ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+ CanQualType ResTy = Context.VoidTy;
+
+ TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
- return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+
+ return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
@@ -243,34 +254,26 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
ArgTys.data(), ArgTys.size());
FunctionInfos.InsertNode(FI, InsertPos);
- // ABI lowering wants to know what our preferred type for the argument is in
- // various situations, pass it in.
- llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes;
- for (llvm::SmallVectorImpl<CanQualType>::const_iterator
- I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) {
- // If this is being called from the guts of the ConvertType loop, make sure
- // to call ConvertTypeRecursive so we don't get into issues with cyclic
- // pointer type structures.
- PreferredArgTypes.push_back(ConvertTypeRecursive(*I));
- }
-
// Compute ABI information.
- getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(),
- PreferredArgTypes.data(), PreferredArgTypes.size());
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &RetInfo = FI->getReturnInfo();
+ if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
+ RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertTypeRecursive(I->type));
// If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
// types, resolve them now. These pointers may point to this function, which
// we *just* filled in the FunctionInfo for.
- if (!IsRecursive && !PointersToResolve.empty()) {
- // Use PATypeHolder's so that our preferred types don't dangle under
- // refinement.
- llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(),
- PreferredArgTypes.end());
+ if (!IsRecursive && !PointersToResolve.empty())
HandleLateResolvedPointers();
- PreferredArgTypes.clear();
- PreferredArgTypes.append(Handles.begin(), Handles.end());
- }
-
return *FI;
}
@@ -311,11 +314,10 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
"Cannot expand structure with bit-field members.");
QualType FT = FD->getType();
- if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ if (CodeGenFunction::hasAggregateLLVMType(FT))
GetExpandedTypes(FT, ArgTys, IsRecursive);
- } else {
+ else
ArgTys.push_back(ConvertType(FT, IsRecursive));
- }
}
}
@@ -613,7 +615,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = ConvertType(RetTy, IsRecursive);
+ ResultType = RetAI.getCoerceToType();
break;
case ABIArgInfo::Indirect: {
@@ -627,10 +629,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Ignore:
ResultType = llvm::Type::getVoidTy(getLLVMContext());
break;
-
- case ABIArgInfo::Coerce:
- ResultType = RetAI.getCoerceToType();
- break;
}
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
@@ -641,7 +639,15 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Coerce: {
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
@@ -655,18 +661,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
break;
}
- case ABIArgInfo::Indirect: {
- // indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
- ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
- break;
- }
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- ArgTys.push_back(ConvertType(it->type, IsRecursive));
- break;
-
case ABIArgInfo::Expand:
GetExpandedTypes(it->type, ArgTys, IsRecursive);
break;
@@ -676,12 +670,18 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic,
return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
}
-const llvm::Type *
-CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) {
+const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- if (!VerifyFuncTypeComplete(FPT))
- return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false);
+ if (!VerifyFuncTypeComplete(FPT)) {
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &getFunctionInfo(MD);
+ return GetFunctionType(*Info, FPT->isVariadic(), false);
+ }
return llvm::OpaqueType::get(getLLVMContext());
}
@@ -730,13 +730,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
- if (RetTy->isSignedIntegerType()) {
+ if (RetTy->hasSignedIntegerRepresentation())
RetAttrs |= llvm::Attribute::SExt;
- } else if (RetTy->isUnsignedIntegerType()) {
+ else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs |= llvm::Attribute::ZExt;
- }
- // FALLTHROUGH
+ break;
case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect:
@@ -748,10 +748,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm::Attribute::ReadNone);
break;
- case ABIArgInfo::Ignore:
- case ABIArgInfo::Coerce:
- break;
-
case ABIArgInfo::Expand:
assert(0 && "Invalid ABI kind for return argument");
}
@@ -759,7 +755,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
if (RetAttrs)
PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
- // FIXME: we need to honour command line settings also...
+ // FIXME: we need to honor command line settings also.
// FIXME: RegParm should be reduced in case of nested functions and/or global
// register variable.
signed RegParm = FI.getRegParm();
@@ -774,15 +770,27 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
// sense to do it here because parameters are so fucked up.
-
switch (AI.getKind()) {
- case ABIArgInfo::Coerce:
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerType())
+ Attributes |= llvm::Attribute::SExt;
+ else if (ParamType->isUnsignedIntegerType())
+ Attributes |= llvm::Attribute::ZExt;
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+
if (const llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType()))
- Index += STy->getNumElements();
- else
- ++Index;
- continue; // Skip index increment.
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
case ABIArgInfo::Indirect:
if (AI.getIndirectByVal())
@@ -795,24 +803,6 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
llvm::Attribute::ReadNone);
break;
- case ABIArgInfo::Extend:
- if (ParamType->isSignedIntegerType()) {
- Attributes |= llvm::Attribute::SExt;
- } else if (ParamType->isUnsignedIntegerType()) {
- Attributes |= llvm::Attribute::ZExt;
- }
- // FALLS THROUGH
- case ABIArgInfo::Direct:
- if (RegParm > 0 &&
- (ParamType->isIntegerType() || ParamType->isPointerType())) {
- RegParm -=
- (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
- if (RegParm >= 0)
- Attributes |= llvm::Attribute::InReg;
- }
- // FIXME: handle sseregparm someday...
- break;
-
case ABIArgInfo::Ignore:
// Skip increment, no matching LLVM parameter.
continue;
@@ -881,7 +871,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// reference.
} else {
// Load scalar value from indirect argument.
- V = EmitLoadOfScalar(V, false, Ty);
+ unsigned Alignment = getContext().getTypeAlignInChars(Ty).getQuantity();
+ V = EmitLoadOfScalar(V, false, Alignment, Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -894,15 +885,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- llvm::Value *V = AI;
- if (hasAggregateLLVMType(Ty)) {
- // Create a temporary alloca to hold the argument; the rest of
- // codegen expects to access aggregates & complex values by
- // reference.
- V = CreateMemTemp(Ty);
- Builder.CreateStore(AI, V);
- } else {
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
@@ -911,53 +900,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// "void a(x) short x; {..."
V = EmitScalarConversion(V, Ty, Arg->getType());
}
+ EmitParmDecl(*Arg, V);
+ break;
}
- EmitParmDecl(*Arg, V);
- break;
- }
-
- case ABIArgInfo::Expand: {
- // If this structure was expanded into multiple arguments then
- // we need to create a temporary and reconstruct it from the
- // arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- // FIXME: What are the right qualifiers here?
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
- EmitParmDecl(*Arg, Temp);
-
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + llvm::Twine(Index));
- continue;
- }
-
- case ABIArgInfo::Ignore:
- // Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty)) {
- EmitParmDecl(*Arg, CreateMemTemp(Ty));
- } else {
- EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
- }
-
- // Skip increment, no matching LLVM parameter.
- continue;
- case ABIArgInfo::Coerce: {
- // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
- // result in a new alloca anyway, so we could just store into that
- // directly if we broke the abstraction down more.
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
- Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity());
+
+ // The alignment we need to use is the max of the requested alignment for
+ // the argument plus the alignment required by our access code below.
+ unsigned AlignmentToUse =
+ CGF.CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ AlignmentToUse = std::max(AlignmentToUse,
+ (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+
+ Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- llvm::Value *Ptr = V;
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
@@ -970,13 +942,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Simple case, just do a coerced store of the argument into the alloca.
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
// Match to what EmitParmDecl is expecting for this type.
if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
- V = EmitLoadOfScalar(V, false, Ty);
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
@@ -986,6 +958,32 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
EmitParmDecl(*Arg, V);
continue; // Skip ++AI increment, already done.
}
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
}
++AI;
@@ -1000,13 +998,14 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
return;
}
- llvm::MDNode *RetDbgInfo = 0;
+ llvm::DebugLoc RetDbgLoc;
llvm::Value *RV = 0;
QualType RetTy = FI.getReturnType();
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType()) {
ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
StoreComplexToAddr(RT, CurFn->arg_begin(), false);
@@ -1014,43 +1013,54 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
// Do nothing; aggregrates get evaluated directly into the destination.
} else {
EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
- false, RetTy);
+ false, Alignment, RetTy);
}
break;
+ }
case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- // The internal return value temp always will have pointer-to-return-type
- // type, just do a load.
-
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
+ }
} else {
- // Get the stored value and nuke the now-dead store.
- RetDbgInfo = SI->getDbgMetadata();
- RV = SI->getValueOperand();
- SI->eraseFromParent();
-
- // If that was the only use of the return value, nuke it as well now.
- if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
- cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
- ReturnValue = 0;
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
}
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
- }
- case ABIArgInfo::Ignore:
- break;
- case ABIArgInfo::Coerce:
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Expand:
@@ -1058,8 +1068,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
}
llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
- if (RetDbgInfo)
- Ret->setDbgMetadata(RetDbgInfo);
+ if (!RetDbgLoc.isUnknown())
+ Ret->setDebugLoc(RetDbgLoc);
}
RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
@@ -1089,7 +1099,8 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
if (hasAggregateLLVMType(ArgType))
return RValue::getAggregate(Local);
- return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
+ unsigned Alignment = getContext().getDeclAlign(Param).getQuantity();
+ return RValue::get(EmitLoadOfScalar(Local, false, Alignment, ArgType));
}
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
@@ -1149,49 +1160,60 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->first;
+ unsigned Alignment =
+ getContext().getTypeAlignInChars(I->second).getQuantity();
switch (ArgInfo.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
Args.push_back(CreateMemTemp(I->second));
if (RV.isScalar())
- EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
+ Alignment, I->second);
else
StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
} else {
Args.push_back(RV.getAggregateAddr());
}
break;
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RV.isScalar()) {
- Args.push_back(RV.getScalarVal());
- } else if (RV.isComplex()) {
- llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
- Args.push_back(Tmp);
- } else {
- Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
- }
- break;
+ }
case ABIArgInfo::Ignore:
break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ if (RV.isScalar())
+ Args.push_back(RV.getScalarVal());
+ else
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ break;
+ }
- case ABIArgInfo::Coerce: {
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
if (RV.isScalar()) {
SrcPtr = CreateMemTemp(I->second, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment,
+ I->second);
} else if (RV.isComplex()) {
SrcPtr = CreateMemTemp(I->second, "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
} else
SrcPtr = RV.getAggregateAddr();
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
@@ -1201,7 +1223,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
- Args.push_back(Builder.CreateLoad(EltPtr));
+ llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
+ // We don't know what we're loading from.
+ LI->setAlignment(1);
+ Args.push_back(LI);
}
} else {
// In the simple case, just pass the coerced loaded value.
@@ -1294,39 +1319,43 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->setName("call");
switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect:
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
return RValue::getAggregate(Args[0]);
- return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RetTy->isAnyComplexType()) {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
-
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
- }
- Builder.CreateStore(CI, DestPtr, DestIsVolatile);
- return RValue::getAggregate(DestPtr);
- }
- return RValue::get(CI);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
+ }
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- case ABIArgInfo::Coerce: {
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ return RValue::get(CI);
+ }
+
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
@@ -1335,12 +1364,22 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestIsVolatile = false;
}
- CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
return RValue::getAggregate(DestPtr);
- return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
}
case ABIArgInfo::Expand:
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index c50fe90f8a81..bf26799b83f3 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
@@ -22,13 +23,13 @@ using namespace CodeGen;
static uint64_t
ComputeNonVirtualBaseClassOffset(ASTContext &Context,
const CXXRecordDecl *DerivedClass,
- CXXBaseSpecifierArray::iterator Start,
- CXXBaseSpecifierArray::iterator End) {
+ CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End) {
uint64_t Offset = 0;
const CXXRecordDecl *RD = DerivedClass;
- for (CXXBaseSpecifierArray::iterator I = Start; I != End; ++I) {
+ for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
const CXXBaseSpecifier *Base = *I;
assert(!Base->isVirtual() && "Should not see virtual bases here!");
@@ -50,12 +51,13 @@ ComputeNonVirtualBaseClassOffset(ASTContext &Context,
llvm::Constant *
CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
- const CXXBaseSpecifierArray &BasePath) {
- assert(!BasePath.empty() && "Base path should not be empty!");
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
uint64_t Offset =
- ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
- BasePath.begin(), BasePath.end());
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ PathBegin, PathEnd);
if (!Offset)
return 0;
@@ -131,11 +133,12 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
llvm::Value *
CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
- const CXXBaseSpecifierArray &BasePath,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
bool NullCheckValue) {
- assert(!BasePath.empty() && "Base path should not be empty!");
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
- CXXBaseSpecifierArray::iterator Start = BasePath.begin();
+ CastExpr::path_const_iterator Start = PathBegin;
const CXXRecordDecl *VBase = 0;
// Get the virtual base.
@@ -147,11 +150,11 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
uint64_t NonVirtualOffset =
ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
- Start, BasePath.end());
+ Start, PathEnd);
// Get the base pointer type.
const llvm::Type *BasePtrTy =
- ConvertType((BasePath.end()[-1])->getType())->getPointerTo();
+ ConvertType((PathEnd[-1])->getType())->getPointerTo();
if (!NonVirtualOffset && !VBase) {
// Just cast back.
@@ -206,16 +209,17 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Value *
CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
- const CXXBaseSpecifierArray &BasePath,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
bool NullCheckValue) {
- assert(!BasePath.empty() && "Base path should not be empty!");
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
QualType DerivedTy =
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
llvm::Value *NonVirtualOffset =
- CGM.GetNonVirtualBaseClassOffset(Derived, BasePath);
+ CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
@@ -310,6 +314,28 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
return VTT;
}
+namespace {
+ /// Call the destructor for a direct base class.
+ struct CallBaseDtor : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
+
+ const CXXDestructorDecl *D = BaseClass->getDestructor();
+ llvm::Value *Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ DerivedClass, BaseClass,
+ BaseIsVirtual);
+ CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
+ }
+ };
+}
+
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXBaseOrMemberInitializer *BaseInit,
@@ -333,18 +359,14 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
// virtual bases, and we only do virtual bases for complete ctors.
llvm::Value *V =
CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
- BaseClassDecl,
- BaseInit->isBaseVirtual());
+ BaseClassDecl,
+ isBaseVirtual);
CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
- if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
- // FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
-
- CXXDestructorDecl *DD = BaseClassDecl->getDestructor();
- CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
- }
+ if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
+ isBaseVirtual);
}
static void EmitAggMemberInitializer(CodeGenFunction &CGF,
@@ -432,6 +454,25 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
// Emit the fall-through block.
CGF.EmitBlock(AfterFor, true);
}
+
+namespace {
+ struct CallMemberDtor : EHScopeStack::Cleanup {
+ FieldDecl *Field;
+ CXXDestructorDecl *Dtor;
+
+ CallMemberDtor(FieldDecl *Field, CXXDestructorDecl *Dtor)
+ : Field(Field), Dtor(Dtor) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // FIXME: Is this OK for C++0x delegating constructors?
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+ };
+}
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
@@ -487,7 +528,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
BasePtr = llvm::PointerType::getUnqual(BasePtr);
llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
BasePtr);
- LHS = LValue::MakeAddr(BaseAddrPtr, CGF.MakeQualifiers(BaseElementTy));
+ LHS = CGF.MakeAddrLValue(BaseAddrPtr, BaseElementTy);
// Create an array index that will be used to walk over all of the
// objects we're constructing.
@@ -532,17 +573,9 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
return;
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (!RD->hasTrivialDestructor()) {
- // FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup);
-
- llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
-
- CXXDestructorDecl *DD = RD->getDestructor();
- CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
- }
+ if (!RD->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallMemberDtor>(EHCleanup, Field,
+ RD->getDestructor());
}
}
@@ -598,6 +631,8 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Before we go any further, try the complete->base constructor
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitStopPoint(Builder);
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
return;
}
@@ -663,113 +698,158 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
CXXDtorType DtorType = CurGD.getDtorType();
+ // The call to operator delete in a deleting destructor happens
+ // outside of the function-try-block, which means it's always
+ // possible to delegate the destructor body to the complete
+ // destructor. Do so.
+ if (DtorType == Dtor_Deleting) {
+ EnterDtorCleanups(Dtor, Dtor_Deleting);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ PopCleanupBlock();
+ return;
+ }
+
Stmt *Body = Dtor->getBody();
// If the body is a function-try-block, enter the try before
- // anything else --- unless we're in a deleting destructor, in which
- // case we're just going to call the complete destructor and then
- // call operator delete() on the way out.
- bool isTryBody = (DtorType != Dtor_Deleting &&
- Body && isa<CXXTryStmt>(Body));
+ // anything else.
+ bool isTryBody = (Body && isa<CXXTryStmt>(Body));
if (isTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- // Emit the destructor epilogue now. If this is a complete
- // destructor with a function-try-block, perform the base epilogue
- // as well.
- //
- // FIXME: This isn't really right, because an exception in the
- // non-EH epilogue should jump to the appropriate place in the
- // EH epilogue.
- {
- CleanupBlock Cleanup(*this, NormalCleanup);
-
- if (isTryBody && DtorType == Dtor_Complete)
- EmitDtorEpilogue(Dtor, Dtor_Base);
- EmitDtorEpilogue(Dtor, DtorType);
-
- if (Exceptions) {
- Cleanup.beginEHCleanup();
-
- if (isTryBody && DtorType == Dtor_Complete)
- EmitDtorEpilogue(Dtor, Dtor_Base);
- EmitDtorEpilogue(Dtor, DtorType);
- }
- }
-
- bool SkipBody = false; // should get jump-threaded
-
- // If this is the deleting variant, just invoke the complete
- // variant, then call the appropriate operator delete() on the way
- // out.
- if (DtorType == Dtor_Deleting) {
- EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- LoadCXXThis());
- SkipBody = true;
-
+ // Enter the epilogue cleanups.
+ RunCleanupsScope DtorEpilogue(*this);
+
// If this is the complete variant, just invoke the base variant;
// the epilogue will destruct the virtual bases. But we can't do
// this optimization if the body is a function-try-block, because
// we'd introduce *two* handler blocks.
- } else if (!isTryBody && DtorType == Dtor_Complete) {
- EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
- LoadCXXThis());
- SkipBody = true;
+ switch (DtorType) {
+ case Dtor_Deleting: llvm_unreachable("already handled deleting case");
+
+ case Dtor_Complete:
+ // Enter the cleanup scopes for virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Complete);
+
+ if (!isTryBody) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ break;
+ }
+ // Fallthrough: act like we're in the base variant.
- // Otherwise, we're in the base variant, so we need to ensure the
- // vtable ptrs are right before emitting the body.
- } else {
+ case Dtor_Base:
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Base);
+
+ // Initialize the vtable pointers before entering the body.
InitializeVTablePointers(Dtor->getParent());
- }
- // Emit the body of the statement.
- if (SkipBody)
- (void) 0;
- else if (isTryBody)
- EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
- else if (Body)
- EmitStmt(Body);
- else {
- assert(Dtor->isImplicit() && "bodyless dtor not implicit");
- // nothing to do besides what's in the epilogue
+ if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+ break;
}
- // We're done with the epilogue cleanup.
- PopCleanupBlock();
+ // Jump out through the epilogue cleanups.
+ DtorEpilogue.ForceCleanup();
// Exit the try if applicable.
if (isTryBody)
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
+namespace {
+ /// Call the operator delete associated with the current destructor.
+ struct CallDtorDelete : EHScopeStack::Cleanup {
+ CallDtorDelete() {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ }
+ };
+
+ struct CallArrayFieldDtor : EHScopeStack::Cleanup {
+ const FieldDecl *Field;
+ CallArrayFieldDtor(const FieldDecl *Field) : Field(Field) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ CGF.getContext().getAsConstantArrayType(FieldType);
+
+ QualType BaseType =
+ CGF.getContext().getBaseElementType(Array->getElementType());
+ const CXXRecordDecl *FieldClassDecl = BaseType->getAsCXXRecordDecl();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
+ // FIXME: Qualifiers?
+ /*CVRQualifiers=*/0);
+
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseType)->getPointerTo();
+ llvm::Value *BaseAddrPtr =
+ CGF.Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ CGF.EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
+ Array, BaseAddrPtr);
+ }
+ };
+
+ struct CallFieldDtor : EHScopeStack::Cleanup {
+ const FieldDecl *Field;
+ CallFieldDtor(const FieldDecl *Field) : Field(Field) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const CXXRecordDecl *FieldClassDecl =
+ Field->getType()->getAsCXXRecordDecl();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS = CGF.EmitLValueForField(ThisPtr, Field,
+ // FIXME: Qualifiers?
+ /*CVRQualifiers=*/0);
+
+ CGF.EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ LHS.getAddress());
+ }
+ };
+}
+
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes
/// in reverse order of their construction.
-void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
- CXXDtorType DtorType) {
+void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
assert(!DD->isTrivial() &&
"Should not emit dtor epilogue for trivial dtor!");
- const CXXRecordDecl *ClassDecl = DD->getParent();
-
- // In a deleting destructor, we've already called the complete
- // destructor as a subroutine, so we just have to delete the
- // appropriate value.
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
if (DtorType == Dtor_Deleting) {
assert(DD->getOperatorDelete() &&
"operator delete missing - EmitDtorEpilogue");
- EmitDeleteCall(DD->getOperatorDelete(), LoadCXXThis(),
- getContext().getTagDeclType(ClassDecl));
+ EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
return;
}
- // For complete destructors, we've already called the base
- // destructor (in GenerateBody), so we just need to destruct all the
- // virtual bases.
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // The complete-destructor phase just destructs all the virtual bases.
if (DtorType == Dtor_Complete) {
- // Handle virtual bases.
- for (CXXRecordDecl::reverse_base_class_const_iterator I =
- ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend();
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
I != E; ++I) {
const CXXBaseSpecifier &Base = *I;
CXXRecordDecl *BaseClassDecl
@@ -778,26 +858,48 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
// Ignore trivial destructors.
if (BaseClassDecl->hasTrivialDestructor())
continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
- llvm::Value *V =
- GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(),
- ClassDecl, BaseClassDecl,
- /*BaseIsVirtual=*/true);
- EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/true, V);
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
}
+
return;
}
assert(DtorType == Dtor_Base);
+
+ // Destroy non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
- // Collect the fields.
+ // Destroy direct fields.
llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
E = ClassDecl->field_end(); I != E; ++I) {
const FieldDecl *Field = *I;
QualType FieldType = getContext().getCanonicalType(Field->getType());
- FieldType = getContext().getBaseElementType(FieldType);
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(Array->getElementType());
const RecordType *RT = FieldType->getAs<RecordType>();
if (!RT)
@@ -806,64 +908,11 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
if (FieldClassDecl->hasTrivialDestructor())
continue;
-
- FieldDecls.push_back(Field);
- }
-
- // Now destroy the fields.
- for (size_t i = FieldDecls.size(); i > 0; --i) {
- const FieldDecl *Field = FieldDecls[i - 1];
-
- QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(FieldType);
-
- const RecordType *RT = FieldType->getAs<RecordType>();
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- llvm::Value *ThisPtr = LoadCXXThis();
-
- LValue LHS = EmitLValueForField(ThisPtr, Field,
- // FIXME: Qualifiers?
- /*CVRQualifiers=*/0);
- if (Array) {
- const llvm::Type *BasePtr = ConvertType(FieldType);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(),
- Array, BaseAddrPtr);
- } else
- EmitCXXDestructorCall(FieldClassDecl->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- LHS.getAddress());
- }
-
- // Destroy non-virtual bases.
- for (CXXRecordDecl::reverse_base_class_const_iterator I =
- ClassDecl->bases_rbegin(), E = ClassDecl->bases_rend(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
-
- // Ignore virtual bases.
- if (Base.isVirtual())
- continue;
-
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
-
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
-
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor();
- llvm::Value *V =
- GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl,
- BaseClassDecl,
- /*BaseIsVirtual=*/false);
-
- EmitCXXDestructorCall(D, Dtor_Base, /*ForVirtualBase=*/false, V);
+ if (Array)
+ EHStack.pushCleanup<CallArrayFieldDtor>(NormalAndEHCleanup, Field);
+ else
+ EHStack.pushCleanup<CallFieldDtor>(NormalAndEHCleanup, Field);
}
}
@@ -873,19 +922,24 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
/// array type and 'ArrayPtr' points to the beginning fo the array.
/// It is assumed that all relevant checks have been made by the caller.
+///
+/// \param ZeroInitialization True if each element should be zero-initialized
+/// before it is constructed.
void
CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
- const ConstantArrayType *ArrayTy,
- llvm::Value *ArrayPtr,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization) {
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value * NumElements =
llvm::ConstantInt::get(SizeTy,
getContext().getConstantArrayElementCount(ArrayTy));
- EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd);
+ EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd,
+ ZeroInitialization);
}
void
@@ -893,7 +947,8 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization) {
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
// Create a temporary for the loop index and initialize it with 0.
@@ -924,6 +979,11 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
"arrayidx");
+ // Zero initialize the storage, if requested.
+ if (ZeroInitialization)
+ EmitNullInitialization(Address,
+ getContext().getTypeDeclType(D->getParent()));
+
// C++ [class.temporary]p4:
// There are two contexts in which temporaries are destroyed at a different
// point than the end of the full-expression. The first context is when a
@@ -1109,21 +1169,33 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
+namespace {
+ struct CallLocalDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+
+ CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ : Dtor(D), Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Addr);
+ }
+ };
+}
+
+void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
+ llvm::Value *Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+}
+
void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
if (!ClassDecl) return;
if (ClassDecl->hasTrivialDestructor()) return;
const CXXDestructorDecl *D = ClassDecl->getDestructor();
-
- CleanupBlock Scope(*this, NormalCleanup);
-
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
-
- if (Exceptions) {
- Scope.beginEHCleanup();
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
- }
+ PushDestructorCleanup(D, Addr);
}
llvm::Value *
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 4e158955f894..406db886eeed 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -15,7 +15,9 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/SourceManager.h"
@@ -37,7 +39,7 @@ using namespace clang::CodeGen;
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugFactory(CGM.getModule()),
- FwdDeclCount(0), BlockLiteralGenericSet(false) {
+ BlockLiteralGenericSet(false) {
CreateCompileUnit();
}
@@ -93,6 +95,58 @@ llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
return llvm::StringRef(StrPtr, NS.length());
}
+llvm::StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+ llvm::SmallString<256> MethodName;
+ llvm::raw_svector_ostream OS(MethodName);
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const ObjCImplementationDecl *OID = dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD = dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OCD->getIdentifier()->getNameStart() << ')';
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
+ char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
+ memcpy(StrPtr, MethodName.begin(), OS.tell());
+ return llvm::StringRef(StrPtr, OS.tell());
+}
+
+/// getClassName - Get class name including template argument list.
+llvm::StringRef
+CGDebugInfo::getClassName(RecordDecl *RD) {
+ ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!Spec)
+ return RD->getName();
+
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ std::string Buffer;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.getFlatArgumentList();
+ NumArgs = TemplateArgs.flat_size();
+ }
+ Buffer = RD->getIdentifier()->getNameStart();
+ PrintingPolicy Policy(CGM.getLangOptions());
+ Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
+ memcpy(StrPtr, Buffer.data(), Buffer.length());
+ return llvm::StringRef(StrPtr, Buffer.length());
+
+}
+
/// getOrCreateFile - Get the file debug info descriptor for the input location.
llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
@@ -113,13 +167,8 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return llvm::DIFile(cast<llvm::MDNode>(it->second));
}
- // FIXME: We shouldn't even need to call 'makeAbsolute()' in the cases
- // where we can consult the FileEntry.
- llvm::sys::Path AbsFileName(PLoc.getFilename());
- AbsFileName.makeAbsolute();
-
- llvm::DIFile F = DebugFactory.CreateFile(AbsFileName.getLast(),
- AbsFileName.getDirname(), TheCU);
+ llvm::DIFile F = DebugFactory.CreateFile(PLoc.getFilename(),
+ getCurrentDirname(), TheCU);
DIFileCache[fname] = F;
return F;
@@ -144,6 +193,16 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
return PLoc.getColumn();
}
+llvm::StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CWDName.empty())
+ return CWDName;
+ char *CompDirnamePtr = NULL;
+ llvm::sys::Path CWD = llvm::sys::Path::GetCurrentDirectory();
+ CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
+ memcpy(CompDirnamePtr, CWD.c_str(), CWD.size());
+ return CWDName = llvm::StringRef(CompDirnamePtr, CWD.size());
+}
+
/// CreateCompileUnit - Create new compile unit.
void CGDebugInfo::CreateCompileUnit() {
@@ -153,19 +212,22 @@ void CGDebugInfo::CreateCompileUnit() {
if (MainFileName.empty())
MainFileName = "<unknown>";
- llvm::sys::Path AbsFileName(MainFileName);
- AbsFileName.makeAbsolute();
-
// The main file name provided via the "-main-file-name" option contains just
// the file name itself with no path information. This file name may have had
// a relative path, so we look into the actual file entry for the main
// file to determine the real absolute path for the file.
std::string MainFileDir;
- if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID()))
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
MainFileDir = MainFile->getDir()->getName();
- else
- MainFileDir = AbsFileName.getDirname();
+ if (MainFileDir != ".")
+ MainFileName = MainFileDir + "/" + MainFileName;
+ }
+ // Save filename string.
+ char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
+ memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
+ llvm::StringRef Filename(FilenamePtr, MainFileName.length());
+
unsigned LangTag;
const LangOptions &LO = CGM.getLangOptions();
if (LO.CPlusPlus) {
@@ -181,11 +243,7 @@ void CGDebugInfo::CreateCompileUnit() {
LangTag = llvm::dwarf::DW_LANG_C89;
}
- const char *Producer =
-#ifdef CLANG_VENDOR
- CLANG_VENDOR
-#endif
- "clang " CLANG_VERSION_STRING;
+ std::string Producer = getClangFullVersion();
// Figure out which version of the ObjC runtime we have.
unsigned RuntimeVers = 0;
@@ -194,7 +252,8 @@ void CGDebugInfo::CreateCompileUnit() {
// Create new compile unit.
TheCU = DebugFactory.CreateCompileUnit(
- LangTag, AbsFileName.getLast(), MainFileDir, Producer, true,
+ LangTag, Filename, getCurrentDirname(),
+ Producer, true,
LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
}
@@ -203,10 +262,49 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
llvm::DIFile Unit) {
unsigned Encoding = 0;
+ const char *BTName = NULL;
switch (BT->getKind()) {
default:
case BuiltinType::Void:
return llvm::DIType();
+ case BuiltinType::ObjCClass:
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_class", Unit, 0, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ case BuiltinType::ObjCId: {
+ // typedef struct objc_class *Class;
+ // typedef struct objc_object {
+ // Class isa;
+ // } *id;
+
+ llvm::DIType OCTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_class", Unit, 0, 0, 0, 0,
+ llvm::DIType::FlagFwdDecl,
+ llvm::DIType(), llvm::DIArray());
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+
+ llvm::DIType ISATy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", Unit,
+ 0, Size, 0, 0, 0, OCTy);
+
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ llvm::DIType FieldTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "isa", Unit,
+ 0,Size, 0, 0, 0, ISATy);
+ EltTys.push_back(FieldTy);
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type,
+ Unit, "objc_object", Unit, 0, 0, 0, 0,
+ 0,
+ llvm::DIType(), Elements);
+ }
case BuiltinType::UChar:
case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
case BuiltinType::Char_S:
@@ -224,14 +322,23 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
case BuiltinType::LongDouble:
case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
}
+
+ switch (BT->getKind()) {
+ case BuiltinType::Long: BTName = "long int"; break;
+ case BuiltinType::LongLong: BTName = "long long int"; break;
+ case BuiltinType::ULong: BTName = "long unsigned int"; break;
+ case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ default:
+ BTName = BT->getName(CGM.getContext().getLangOptions());
+ break;
+ }
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
uint64_t Offset = 0;
-
+
llvm::DIType DbgTy =
- DebugFactory.CreateBasicType(Unit,
- BT->getName(CGM.getContext().getLangOptions()),
+ DebugFactory.CreateBasicType(Unit, BTName,
Unit, 0, Size, Align,
Offset, /*flags*/ 0, Encoding);
return DbgTy;
@@ -461,7 +568,6 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
I != E; ++I, ++FieldNo) {
FieldDecl *Field = *I;
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
-
llvm::StringRef FieldName = Field->getName();
// Ignore unnamed fields. Do not ignore unnamed records.
@@ -481,7 +587,6 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
Expr *BitWidth = Field->getBitWidth();
if (BitWidth)
FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
-
FieldAlign = CGM.getContext().getTypeAlign(FType);
}
@@ -516,9 +621,12 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
0),
Unit);
- // Static methods do not need "this" pointer argument.
- if (Method->isStatic())
- return FnTy;
+ unsigned BFlags=0;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIType::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIType::FlagProtected;
// Add "this" pointer.
@@ -530,27 +638,18 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
- // "this" pointer is always first argument.
- ASTContext &Context = CGM.getContext();
- QualType ThisPtr =
- Context.getPointerType(Context.getTagDeclType(Method->getParent()));
- llvm::DIType ThisPtrType =
- DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
-
- unsigned Quals = Method->getTypeQualifiers();
- if (Quals & Qualifiers::Const)
- ThisPtrType =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_const_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0, ThisPtrType);
- if (Quals & Qualifiers::Volatile)
- ThisPtrType =
- DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_volatile_type,
- Unit, "", Unit,
- 0, 0, 0, 0, 0, ThisPtrType);
-
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
- Elts.push_back(ThisPtrType);
+ if (!Method->isStatic())
+ {
+ // "this" pointer is always first argument.
+ ASTContext &Context = CGM.getContext();
+ QualType ThisPtr =
+ Context.getPointerType(Context.getTagDeclType(Method->getParent()));
+ llvm::DIType ThisPtrType =
+ DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ Elts.push_back(ThisPtrType);
+ }
// Copy rest of the arguments.
for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
@@ -571,7 +670,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DISubprogram
CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIFile Unit,
- llvm::DICompositeType &RecordTy) {
+ llvm::DIType RecordTy) {
bool IsCtorOrDtor =
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
@@ -612,7 +711,9 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
MethodDefUnit, MethodLine,
MethodTy, /*isLocalToUnit=*/false,
/* isDefintion=*/ false,
- Virtuality, VIndex, ContainingType);
+ Virtuality, VIndex, ContainingType,
+ Method->isImplicit(),
+ CGM.getLangOptions().Optimize);
// Don't cache ctors or dtors since we have to emit multiple functions for
// a single ctor or dtor.
@@ -628,7 +729,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
- llvm::DICompositeType &RecordTy) {
+ llvm::DIType RecordTy) {
for(CXXRecordDecl::method_iterator I = RD->method_begin(),
E = RD->method_end(); I != E; ++I) {
const CXXMethodDecl *Method = *I;
@@ -640,13 +741,41 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
}
}
+/// CollectCXXFriends - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy) {
+
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ BE = RD->friend_end(); BI != BE; ++BI) {
+
+ TypeSourceInfo *TInfo = (*BI)->getFriendType();
+ if(TInfo)
+ {
+ llvm::DIType Ty = getOrCreateType(TInfo->getType(), Unit);
+
+ llvm::DIType DTy =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_friend,
+ RecordTy, llvm::StringRef(),
+ Unit, 0, 0, 0,
+ 0, 0, Ty);
+
+ EltTys.push_back(DTy);
+ }
+
+ }
+}
+
/// CollectCXXBases - A helper function to collect debug info for
/// C++ base classes. This is used while creating debug info entry for
/// a Record.
void CGDebugInfo::
CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
- llvm::DICompositeType &RecordTy) {
+ llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
@@ -786,14 +915,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
return FwdDecl;
}
- // A RD->getName() is not unique. However, the debug info descriptors
- // are uniqued so use type name to ensure uniquness.
- llvm::SmallString<128> FwdDeclName;
- llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++;
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, FDContext, FwdDeclName,
- DefUnit, Line, 0, 0, 0, 0,
- llvm::DIType(), llvm::DIArray());
+ llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -812,10 +934,36 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
CollectVTableInfo(CXXDecl, Unit, EltTys);
}
+
+ // Collect static variables with initializers.
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (const Expr *Init = V->getInit()) {
+ Expr::EvalResult Result;
+ if (Init->Evaluate(Result, CGM.getContext()) && Result.Val.isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Result.Val.getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ llvm::StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DebugFactory.CreateGlobalVariable(FwdDecl, VName, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, true, CI);
+ }
+ }
+ }
+ }
+
CollectRecordFields(RD, Unit, EltTys);
llvm::MDNode *ContainingType = NULL;
if (CXXDecl) {
CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, Unit, EltTys, FwdDecl);
// A class's primary base or the class itself contains the vtable.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
@@ -841,16 +989,22 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DIDescriptor RDContext =
getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+
+ llvm::StringRef RDName = RD->getName();
+ // If this is a class, include the template arguments also.
+ if (Tag == llvm::dwarf::DW_TAG_class_type)
+ RDName = getClassName(RD);
+
llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, RDContext,
- RD->getName(),
+ RDName,
DefUnit, Line, Size, Align, 0, 0,
llvm::DIType(), Elements,
0, ContainingType);
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
- llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
RegionMap[RD] = llvm::WeakVH(RealDecl);
return RealDecl;
}
@@ -873,21 +1027,24 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
unsigned Line = getLineNumber(ID->getLocation());
unsigned RuntimeLang = TheCU.getLanguage();
+ // If this is just a forward declaration, return a special forward-declaration
+ // debug type.
+ if (ID->isForwardDecl()) {
+ llvm::DICompositeType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
+ DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray(),
+ RuntimeLang);
+ return FwdDecl;
+ }
+
// To handle recursive interface, we
// first generate a debug descriptor for the struct as a forward declaration.
// Then (if it is a definition) we go through and get debug info for all of
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DICompositeType FwdDecl =
- DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
- DefUnit, Line, 0, 0, 0, 0,
- llvm::DIType(), llvm::DIArray(),
- RuntimeLang);
-
- // If this is just a forward declaration, return it.
- if (ID->isForwardDecl())
- return FwdDecl;
+ llvm::DIType FwdDecl = DebugFactory.CreateTemporaryType();
llvm::MDNode *MN = FwdDecl;
llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
@@ -982,7 +1139,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
- llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+ llvm::DIType(FwdDeclNode).replaceAllUsesWith(RealDecl);
RegionMap[ID] = llvm::WeakVH(RealDecl);
return RealDecl;
@@ -990,39 +1147,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
llvm::DIFile Unit) {
- EnumDecl *ED = Ty->getDecl();
-
- llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
-
- // Create DIEnumerator elements for each enumerator.
- for (EnumDecl::enumerator_iterator
- Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
- Enum != EnumEnd; ++Enum) {
- Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
- Enum->getInitVal().getZExtValue()));
- }
+ return CreateEnumType(Ty->getDecl(), Unit);
- // Return a CompositeType for the enum itself.
- llvm::DIArray EltArray =
- DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
-
- llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
- unsigned Line = getLineNumber(ED->getLocation());
-
- // Size and align of the type.
- uint64_t Size = 0;
- unsigned Align = 0;
- if (!Ty->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(Ty);
- Align = CGM.getContext().getTypeAlign(Ty);
- }
-
- llvm::DIType DbgTy =
- DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
- Unit, ED->getName(), DefUnit, Line,
- Size, Align, 0, 0,
- llvm::DIType(), EltArray);
- return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
@@ -1036,7 +1162,7 @@ llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
}
llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
- llvm::DIFile Unit) {
+ llvm::DIFile Unit) {
llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
uint64_t NumElems = Ty->getNumElements();
if (NumElems > 0)
@@ -1052,7 +1178,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
Unit, "", Unit,
0, Size, Align, 0, 0,
- ElementTy, SubscriptArray);
+ ElementTy, SubscriptArray);
}
llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
@@ -1149,6 +1275,38 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
0, 0, 0, llvm::DIType(), Elements);
}
+/// CreateEnumType - get enumeration type.
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit){
+ llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+ llvm::DIType DbgTy =
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ Unit, ED->getName(), DefUnit, Line,
+ Size, Align, 0, 0,
+ llvm::DIType(), EltArray);
+ return DbgTy;
+}
+
static QualType UnwrapTypeForDebugInfo(QualType T) {
do {
QualType LastT = T;
@@ -1312,6 +1470,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::StringRef Name;
llvm::StringRef LinkageName;
+ FnBeginRegionCount.push_back(RegionStack.size());
+
const Decl *D = GD.getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
@@ -1329,6 +1489,9 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Name = getFunctionName(FD);
// Use mangled name as linkage name for c/c++ functions.
LinkageName = CGM.getMangledName(GD);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ LinkageName = Name;
} else {
// Use llvm function name as linkage name.
Name = Fn->getName();
@@ -1346,16 +1509,22 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::DISubprogram SP =
DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
getOrCreateType(FnType, Unit),
- Fn->hasInternalLinkage(), true/*definition*/);
+ Fn->hasInternalLinkage(), true/*definition*/,
+ 0, 0, llvm::DIType(),
+ D->isImplicit(),
+ CGM.getLangOptions().Optimize, Fn);
// Push function on region stack.
llvm::MDNode *SPN = SP;
RegionStack.push_back(SPN);
RegionMap[D] = llvm::WeakVH(SP);
+
+ // Clear stack used to keep track of #line directives.
+ LineDirectiveFiles.clear();
}
-void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
+void CGDebugInfo::EmitStopPoint(CGBuilderTy &Builder) {
if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
// Don't bother if things are the same as last time.
@@ -1377,13 +1546,63 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
Scope));
}
+/// UpdateLineDirectiveRegion - Update region stack only if #line directive
+/// has introduced scope change.
+void CGDebugInfo::UpdateLineDirectiveRegion(CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID() ||
+ PrevLoc.isInvalid() || PrevLoc.isMacroID())
+ return;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+ PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
+
+ if (!strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ return;
+
+ // If #line directive stack is empty then we are entering a new scope.
+ if (LineDirectiveFiles.empty()) {
+ EmitRegionStart(Builder);
+ LineDirectiveFiles.push_back(PCLoc.getFilename());
+ return;
+ }
+
+ assert (RegionStack.size() >= LineDirectiveFiles.size()
+ && "error handling #line regions!");
+
+ bool SeenThisFile = false;
+ for(std::vector<const char *>::iterator I = LineDirectiveFiles.begin(),
+ E = LineDirectiveFiles.end(); I != E; ++I)
+ if (!strcmp(PPLoc.getFilename(), *I)) {
+ SeenThisFile = true;
+ break;
+ }
+
+ // If #line for this file is seen earlier then pop out #line regions.
+ if (SeenThisFile) {
+ while (!LineDirectiveFiles.empty()) {
+ const char *LastFile = LineDirectiveFiles.back();
+ RegionStack.pop_back();
+ LineDirectiveFiles.pop_back();
+ if (!strcmp(PPLoc.getFilename(), LastFile))
+ break;
+ }
+ return;
+ }
+
+ // .. otherwise insert new #line region.
+ EmitRegionStart(Builder);
+ LineDirectiveFiles.push_back(PCLoc.getFilename());
+
+ return;
+}
/// EmitRegionStart- Constructs the debug code for entering a declarative
/// region - "llvm.dbg.region.start.".
-void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+void CGDebugInfo::EmitRegionStart(CGBuilderTy &Builder) {
llvm::DIDescriptor D =
DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
llvm::DIDescriptor() :
llvm::DIDescriptor(RegionStack.back()),
+ getOrCreateFile(CurLoc),
getLineNumber(CurLoc),
getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
@@ -1392,15 +1611,27 @@ void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
/// EmitRegionEnd - Constructs the debug code for exiting a declarative
/// region - "llvm.dbg.region.end."
-void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
+void CGDebugInfo::EmitRegionEnd(CGBuilderTy &Builder) {
assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
// Provide an region stop point.
- EmitStopPoint(Fn, Builder);
+ EmitStopPoint(Builder);
RegionStack.pop_back();
}
+/// EmitFunctionEnd - Constructs the debug code for exiting a function.
+void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+ unsigned RCount = FnBeginRegionCount.back();
+ assert(RCount <= RegionStack.size() && "Region stack mismatch");
+
+ // Pop all regions for this function.
+ while (RegionStack.size() != RCount)
+ EmitRegionEnd(Builder);
+ FnBeginRegionCount.pop_back();
+}
+
// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
@@ -1643,6 +1874,26 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
true/*definition*/, Var);
}
+/// EmitGlobalVariable - Emit global variable's debug info.
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+ llvm::ConstantInt *Init,
+ CGBuilderTy &Builder) {
+ // Create the descriptor for the variable.
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::StringRef Name = VD->getName();
+ llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
+ Ty = CreateEnumType(ED, Unit);
+ }
+ // Do not use DIGlobalVariable for enums.
+ if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
+ return;
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, true, Init);
+}
+
/// getOrCreateNamesSpace - Return namespace descriptor for the given
/// namespace decl.
llvm::DINameSpace
@@ -1659,7 +1910,7 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
llvm::DINameSpace NS =
DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
- llvm::DIFile(Unit), LineNo);
+ llvm::DIFile(Unit), LineNo);
NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
return NS;
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 620a5f2f8480..a1ad012353e5 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -46,9 +46,6 @@ class CGDebugInfo {
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
llvm::DIType VTablePtrType;
- /// FwdDeclCount - This counter is used to ensure unique names for forward
- /// record decls.
- unsigned FwdDeclCount;
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
@@ -58,10 +55,19 @@ class CGDebugInfo {
std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ // FnBeginRegionCount - Keep track of RegionStack counter at the beginning
+ // of a function. This is used to pop unbalanced regions at the end of a
+ // function.
+ std::vector<unsigned> FnBeginRegionCount;
+
+ /// LineDirectiveFiles - This stack is used to keep track of
+ /// scopes introduced by #line directives.
+ std::vector<const char *> LineDirectiveFiles;
/// DebugInfoNames - This is a storage for names that are
/// constructed on demand. For example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator DebugInfoNames;
+ llvm::StringRef CWDName;
llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
@@ -86,6 +92,7 @@ class CGDebugInfo {
llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateEnumType(const EnumDecl *ED, llvm::DIFile Unit);
llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile F);
llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
@@ -98,16 +105,22 @@ class CGDebugInfo {
llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIFile F,
- llvm::DICompositeType &RecordTy);
+ llvm::DIType RecordTy);
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
llvm::DIFile F,
llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
- llvm::DICompositeType &T);
+ llvm::DIType T);
+
+ void CollectCXXFriends(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+ llvm::DIType RecordTy);
+
void CollectCXXBases(const CXXRecordDecl *Decl,
llvm::DIFile F,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
- llvm::DICompositeType &RecordTy);
+ llvm::DIType RecordTy);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
@@ -127,20 +140,27 @@ public:
/// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
/// source line.
- void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder);
+ void EmitStopPoint(CGBuilderTy &Builder);
/// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
void EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::Function *Fn, CGBuilderTy &Builder);
+ /// EmitFunctionEnd - Constructs the debug code for exiting a function.
+ void EmitFunctionEnd(CGBuilderTy &Builder);
+
+ /// UpdateLineDirectiveRegion - Update region stack only if #line directive
+ /// has introduced scope change.
+ void UpdateLineDirectiveRegion(CGBuilderTy &Builder);
+
/// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
/// of a new block.
- void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
+ void EmitRegionStart(CGBuilderTy &Builder);
/// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
/// block.
- void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
+ void EmitRegionEnd(CGBuilderTy &Builder);
/// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
/// variable declaration.
@@ -165,6 +185,10 @@ public:
/// EmitGlobalVariable - Emit information about an objective-c interface.
void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+ /// EmitGlobalVariable - Emit global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::ConstantInt *Init,
+ CGBuilderTy &Builder);
+
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
@@ -183,6 +207,9 @@ private:
llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
llvm::DIDescriptor &CU);
+ /// getCurrentDirname - Return current directory name.
+ llvm::StringRef getCurrentDirname();
+
/// CreateCompileUnit - Create new compile unit.
void CreateCompileUnit();
@@ -205,6 +232,12 @@ private:
/// name is constructred on demand (e.g. C++ destructor) then the name
/// is stored on the side.
llvm::StringRef getFunctionName(const FunctionDecl *FD);
+ /// getObjCMethodName - Returns the unmangled name of an Objective-C method.
+ /// This is the display name for the debugging info.
+ llvm::StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+
+ /// getClassName - Get class name including template argument list.
+ llvm::StringRef getClassName(RecordDecl *RD);
/// getVTableName - Get vtable name for the given Class.
llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 1a62ea95555d..57e5236c67e5 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -107,11 +107,11 @@ void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
CGM.ErrorUnsupported(&D, "__asm__");
switch (D.getStorageClass()) {
- case VarDecl::None:
- case VarDecl::Auto:
- case VarDecl::Register:
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
return EmitLocalBlockVarDecl(D);
- case VarDecl::Static: {
+ case SC_Static: {
llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
@@ -126,8 +126,8 @@ void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
return EmitStaticBlockVarDecl(D, Linkage);
}
- case VarDecl::Extern:
- case VarDecl::PrivateExtern:
+ case SC_Extern:
+ case SC_PrivateExtern:
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
}
@@ -183,7 +183,7 @@ llvm::GlobalVariable *
CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV) {
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
-
+
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
@@ -198,12 +198,12 @@ CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
}
return GV;
}
-
+
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
// in the LLVM type system.)
- if (GV->getType() != Init->getType()) {
+ if (GV->getType()->getElementType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
@@ -373,7 +373,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
}
// T x;
- Types.push_back(ConvertType(Ty));
+ Types.push_back(ConvertTypeForMem(Ty));
const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
@@ -389,7 +389,7 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
}
namespace {
- struct CallArrayDtor : EHScopeStack::LazyCleanup {
+ struct CallArrayDtor : EHScopeStack::Cleanup {
CallArrayDtor(const CXXDestructorDecl *Dtor,
const ConstantArrayType *Type,
llvm::Value *Loc)
@@ -408,7 +408,7 @@ namespace {
}
};
- struct CallVarDtor : EHScopeStack::LazyCleanup {
+ struct CallVarDtor : EHScopeStack::Cleanup {
CallVarDtor(const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag,
llvm::Value *Loc)
@@ -440,12 +440,64 @@ namespace {
};
}
+namespace {
+ struct CallStackRestore : EHScopeStack::Cleanup {
+ llvm::Value *Stack;
+ CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, V);
+ }
+ };
+
+ struct CallCleanupFunction : EHScopeStack::Cleanup {
+ llvm::Constant *CleanupFn;
+ const CGFunctionInfo &FnInfo;
+ llvm::Value *Addr;
+ const VarDecl &Var;
+
+ CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
+ llvm::Value *Addr, const VarDecl *Var)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Addr(Addr), Var(*Var) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = FnInfo.arg_begin()->type;
+ llvm::Value *Arg =
+ CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType())));
+ CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ }
+ };
+
+ struct CallBlockRelease : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
+ V = CGF.Builder.CreateLoad(V);
+ CGF.BuildBlockRelease(V);
+ }
+ };
+}
+
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
CharUnits Align = CharUnits::Zero();
@@ -461,10 +513,10 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
// If this value is an array or struct, is POD, and if the initializer is
// a staticly determinable constant, try to optimize it (unless the NRVO
// is already optimizing this).
- if (D.getInit() && !isByRef &&
+ if (!NRVO && D.getInit() && !isByRef &&
(Ty->isArrayType() || Ty->isRecordType()) &&
Ty->isPODType() &&
- D.getInit()->isConstantInitializer(getContext()) && !NRVO) {
+ D.getInit()->isConstantInitializer(getContext(), false)) {
// If this variable is marked 'const', emit the value as a global.
if (CGM.getCodeGenOpts().MergeAllConstants &&
Ty.isConstant(getContext())) {
@@ -516,7 +568,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
} else {
// Targets that don't support recursion emit locals as globals.
const char *Class =
- D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
+ D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
DeclPtr = CreateStaticBlockVarDecl(D, Class,
llvm::GlobalValue
::InternalLinkage);
@@ -540,20 +592,14 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
DidCallStackSave = true;
- {
- // Push a cleanup block and restore the stack there.
- CleanupBlock scope(*this, NormalCleanup);
-
- V = Builder.CreateLoad(Stack, "tmp");
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
- Builder.CreateCall(F, V);
- }
+ // Push a cleanup block and restore the stack there.
+ EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
}
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
- llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
+ llvm::PointerType::get(LElemTy, Ty.getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
@@ -658,13 +704,12 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
D.getNameAsString());
- bool isVolatile =
- getContext().getCanonicalType(D.getType()).isVolatileQualified();
+ bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
// If the initializer was a simple constant initializer, we can optimize it
// in various ways.
if (IsSimpleConstantInitializer) {
- llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
assert(Init != 0 && "Wasn't a simple constant init?");
llvm::Value *AlignVal =
@@ -708,10 +753,10 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
}
} else if (Ty->isReferenceType()) {
RValue RV = EmitReferenceBindingToExpr(Init, &D);
- EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
+ EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
+ EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
} else if (Init->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(Init, Loc, isVolatile);
} else {
@@ -738,11 +783,11 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
if (const ConstantArrayType *Array =
getContext().getAsConstantArrayType(Ty)) {
- EHStack.pushLazyCleanup<CallArrayDtor>(NormalAndEHCleanup,
- D, Array, Loc);
+ EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
+ D, Array, Loc);
} else {
- EHStack.pushLazyCleanup<CallVarDtor>(NormalAndEHCleanup,
- D, NRVOFlag, Loc);
+ EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
+ D, NRVOFlag, Loc);
}
}
}
@@ -755,52 +800,14 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
assert(F && "Could not find function!");
const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
-
- // In some cases, the type of the function argument will be different from
- // the type of the pointer. An example of this is
- // void f(void* arg);
- // __attribute__((cleanup(f))) void *g;
- //
- // To fix this we insert a bitcast here.
- QualType ArgTy = Info.arg_begin()->type;
-
- CleanupBlock CleanupScope(*this, NormalCleanup);
-
- // Normal cleanup.
- CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
- ConvertType(ArgTy))),
- getContext().getPointerType(D.getType())));
- EmitCall(Info, F, ReturnValueSlot(), Args);
-
- // EH cleanup.
- if (Exceptions) {
- CleanupScope.beginEHCleanup();
-
- CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
- ConvertType(ArgTy))),
- getContext().getPointerType(D.getType())));
- EmitCall(Info, F, ReturnValueSlot(), Args);
- }
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup,
+ F, &Info, DeclPtr, &D);
}
- if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
- CleanupBlock CleanupScope(*this, NormalCleanup);
-
- llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- V = Builder.CreateLoad(V);
- BuildBlockRelease(V);
-
- // FIXME: Turn this on and audit the codegen
- if (0 && Exceptions) {
- CleanupScope.beginEHCleanup();
-
- llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- V = Builder.CreateLoad(V);
- BuildBlockRelease(V);
- }
- }
+ // If this is a block variable, clean it up.
+ // FIXME: this should be an EH cleanup as well. rdar://problem/8224178
+ if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
+ EHStack.pushCleanup<CallBlockRelease>(NormalCleanup, DeclPtr);
}
/// Emit an alloca (or GlobalValue depending on target)
@@ -822,7 +829,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
// Store the initial value into the alloca.
- EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty);
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Alignment, Ty);
}
Arg->setName(D.getName());
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index ec3f38667b7a..e2f197522b26 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
@@ -30,9 +31,10 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
QualType T = D.getType();
bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
+ unsigned Alignment = Context.getDeclAlign(&D).getQuantity();
if (!CGF.hasAggregateLLVMType(T)) {
llvm::Value *V = CGF.EmitScalarExpr(Init);
- CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
+ CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, Alignment, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
@@ -45,19 +47,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
CodeGenModule &CGM = CGF.CGM;
ASTContext &Context = CGF.getContext();
- const Expr *Init = D.getInit();
QualType T = D.getType();
- if (!CGF.hasAggregateLLVMType(T) || T->isAnyComplexType())
- return;
-
- // Avoid generating destructor(s) for initialized objects.
- if (!isa<CXXConstructExpr>(Init))
- return;
+ // Drill down past array types.
const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
if (Array)
T = Context.getBaseElementType(Array);
+ /// If that's not a record, we're done.
+ /// FIXME: __attribute__((cleanup)) ?
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return;
@@ -94,8 +92,9 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
return;
}
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init, &D);
- EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
void
@@ -174,13 +173,26 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ DelayedCXXInitPosition.erase(D);
+ }
+ else {
+ llvm::DenseMap<const Decl *, unsigned>::iterator I =
+ DelayedCXXInitPosition.find(D);
+ if (I == DelayedCXXInitPosition.end()) {
+ CXXGlobalInits.push_back(Fn);
+ } else {
+ assert(CXXGlobalInits[I->second] == 0);
+ CXXGlobalInits[I->second] = Fn;
+ DelayedCXXInitPosition.erase(I);
+ }
}
- else
- CXXGlobalInits.push_back(Fn);
}
void
CodeGenModule::EmitCXXGlobalInitFunc() {
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
@@ -200,8 +212,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
LocalCXXGlobalInits.push_back(Fn);
}
- for (unsigned i = 0; i < CXXGlobalInits.size(); i++)
- LocalCXXGlobalInits.push_back(CXXGlobalInits[i]);
+ LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
&LocalCXXGlobalInits[0],
LocalCXXGlobalInits.size());
@@ -247,7 +258,8 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
SourceLocation());
for (unsigned i = 0; i != NumDecls; ++i)
- Builder.CreateCall(Decls[i]);
+ if (Decls[i])
+ Builder.CreateCall(Decls[i]);
FinishFunction();
}
@@ -316,6 +328,20 @@ static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
}
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // It shouldn't be possible for this to throw, but if it can,
+ // this should allow for the possibility of an invoke.
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
void
CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
llvm::GlobalVariable *GV) {
@@ -325,10 +351,10 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
llvm::SmallString<256> GuardVName;
- CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);
+ CGM.getCXXABI().getMangleContext().mangleGuardVariable(&D, GuardVName);
// Create the guard variable.
- llvm::GlobalValue *GuardVariable =
+ llvm::GlobalVariable *GuardVariable =
new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
false, GV->getLinkage(),
llvm::Constant::getNullValue(Int64Ty),
@@ -360,23 +386,25 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
InitBlock, EndBlock);
// Call __cxa_guard_abort along the exceptional edge.
- if (Exceptions) {
- CleanupBlock Cleanup(*this, EHCleanup);
- Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
- }
+ if (Exceptions)
+ EHStack.pushCleanup<CallGuardAbort>(EHCleanup, GuardVariable);
EmitBlock(InitBlock);
}
if (D.getType()->isReferenceType()) {
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
QualType T = D.getType();
RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D);
- EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);
-
+ EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, Alignment, T);
} else
EmitDeclInit(*this, D, GV);
if (ThreadsafeStatics) {
+ // Pop the guard-abort cleanup if we pushed one.
+ if (Exceptions)
+ PopCleanupBlock();
+
// Call __cxa_guard_release. This cannot throw.
Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
} else {
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 4980aad1b383..7fb616e5a150 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -16,8 +16,10 @@
#include "llvm/Intrinsics.h"
#include "llvm/Support/CallSite.h"
+#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CGException.h"
+#include "TargetInfo.h"
using namespace clang;
using namespace CodeGen;
@@ -62,29 +64,26 @@ EHScopeStack::getEnclosingEHCleanup(iterator it) const {
return stabilize(it);
return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
}
- if (isa<EHLazyCleanupScope>(*it)) {
- if (cast<EHLazyCleanupScope>(*it).isEHCleanup())
- return stabilize(it);
- return cast<EHLazyCleanupScope>(*it).getEnclosingEHCleanup();
- }
++it;
} while (it != end());
return stable_end();
}
-void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) {
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
- char *Buffer = allocate(EHLazyCleanupScope::getSizeForCleanupSize(Size));
- bool IsNormalCleanup = Kind != EHCleanup;
- bool IsEHCleanup = Kind != NormalCleanup;
- EHLazyCleanupScope *Scope =
- new (Buffer) EHLazyCleanupScope(IsNormalCleanup,
- IsEHCleanup,
- Size,
- BranchFixups.size(),
- InnermostNormalCleanup,
- InnermostEHCleanup);
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup);
if (IsNormalCleanup)
InnermostNormalCleanup = stable_begin();
if (IsEHCleanup)
@@ -93,36 +92,19 @@ void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) {
return Scope->getCleanupBuffer();
}
-void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
- llvm::BasicBlock *NormalExit,
- llvm::BasicBlock *EHEntry,
- llvm::BasicBlock *EHExit) {
- char *Buffer = allocate(EHCleanupScope::getSize());
- new (Buffer) EHCleanupScope(BranchFixups.size(),
- InnermostNormalCleanup,
- InnermostEHCleanup,
- NormalEntry, NormalExit, EHEntry, EHExit);
- if (NormalEntry)
- InnermostNormalCleanup = stable_begin();
- if (EHEntry)
- InnermostEHCleanup = stable_begin();
-}
-
void EHScopeStack::popCleanup() {
assert(!empty() && "popping exception stack when not empty");
- if (isa<EHLazyCleanupScope>(*begin())) {
- EHLazyCleanupScope &Cleanup = cast<EHLazyCleanupScope>(*begin());
- InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
- StartOfData += Cleanup.getAllocatedSize();
- } else {
- assert(isa<EHCleanupScope>(*begin()));
- EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
- InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
- InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
- StartOfData += EHCleanupScope::getSize();
- }
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
// Check whether we can shrink the branch-fixups stack.
if (!BranchFixups.empty()) {
@@ -149,6 +131,8 @@ void EHScopeStack::popFilter() {
EHFilterScope &Filter = cast<EHFilterScope>(*begin());
StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
assert(CatchDepth > 0 && "mismatched filter push/pop");
CatchDepth--;
}
@@ -156,13 +140,16 @@ void EHScopeStack::popFilter() {
EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
CatchDepth++;
- return new (Buffer) EHCatchScope(NumHandlers);
+ EHCatchScope *Scope = new (Buffer) EHCatchScope(NumHandlers);
+ for (unsigned I = 0; I != NumHandlers; ++I)
+ Scope->getHandlers()[I].Index = getNextEHDestIndex();
+ return Scope;
}
void EHScopeStack::pushTerminate() {
char *Buffer = allocate(EHTerminateScope::getSize());
CatchDepth++;
- new (Buffer) EHTerminateScope();
+ new (Buffer) EHTerminateScope(getNextEHDestIndex());
}
/// Remove any 'null' fixups on the stack. However, we can't pop more
@@ -176,11 +163,7 @@ void EHScopeStack::popNullFixups() {
assert(hasNormalCleanups());
EHScopeStack::iterator it = find(InnermostNormalCleanup);
- unsigned MinSize;
- if (isa<EHCleanupScope>(*it))
- MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
- else
- MinSize = cast<EHLazyCleanupScope>(*it).getFixupDepth();
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
while (BranchFixups.size() > MinSize &&
@@ -188,20 +171,6 @@ void EHScopeStack::popNullFixups() {
BranchFixups.pop_back();
}
-void EHScopeStack::resolveBranchFixups(llvm::BasicBlock *Dest) {
- assert(Dest && "null block passed to resolveBranchFixups");
-
- if (BranchFixups.empty()) return;
- assert(hasNormalCleanups() &&
- "branch fixups exist with no normal cleanups on stack");
-
- for (unsigned I = 0, E = BranchFixups.size(); I != E; ++I)
- if (BranchFixups[I].Destination == Dest)
- BranchFixups[I].Destination = 0;
-
- popNullFixups();
-}
-
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
@@ -303,7 +272,7 @@ llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
false);
if (CGM.getLangOptions().SjLjExceptions)
- return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
}
@@ -317,74 +286,88 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
}
-static const char *getCPersonalityFn(CodeGenFunction &CGF) {
- return "__gcc_personality_v0";
+static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
+ const char *Name) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
+ const llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, Name);
+}
+
+const EHPersonality EHPersonality::GNU_C("__gcc_personality_v0");
+const EHPersonality EHPersonality::NeXT_ObjC("__objc_personality_v0");
+const EHPersonality EHPersonality::GNU_CPlusPlus("__gxx_personality_v0");
+const EHPersonality EHPersonality::GNU_CPlusPlus_SJLJ("__gxx_personality_sj0");
+const EHPersonality EHPersonality::GNU_ObjC("__gnu_objc_personality_v0",
+ "objc_exception_throw");
+
+static const EHPersonality &getCPersonality(const LangOptions &L) {
+ return EHPersonality::GNU_C;
}
-static const char *getObjCPersonalityFn(CodeGenFunction &CGF) {
- if (CGF.CGM.getLangOptions().NeXTRuntime) {
- if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
- return "__objc_personality_v0";
- else
- return getCPersonalityFn(CGF);
+static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
+ else return getCPersonality(L);
} else {
- return "__gnu_objc_personality_v0";
+ return EHPersonality::GNU_ObjC;
}
}
-static const char *getCXXPersonalityFn(CodeGenFunction &CGF) {
- if (CGF.CGM.getLangOptions().SjLjExceptions)
- return "__gxx_personality_sj0";
+static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
else
- return "__gxx_personality_v0";
+ return EHPersonality::GNU_CPlusPlus;
}
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
-static const char *getObjCXXPersonalityFn(CodeGenFunction &CGF) {
+static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
// function on targets using (backend-driven) SJLJ EH.
- if (CGF.CGM.getLangOptions().NeXTRuntime) {
- if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
- return "__objc_personality_v0";
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI)
+ return EHPersonality::NeXT_ObjC;
// In the fragile ABI, just use C++ exception handling and hope
// they're not doing crazy exception mixing.
else
- return getCXXPersonalityFn(CGF);
+ return getCXXPersonality(L);
}
- // I'm pretty sure the GNU runtime doesn't support mixed EH.
- // TODO: we don't necessarily need mixed EH here; remember what
- // kind of exceptions we actually try to catch in this function.
- CGF.CGM.ErrorUnsupported(CGF.CurCodeDecl,
- "the GNU Objective C runtime does not support "
- "catching C++ and Objective C exceptions in the "
- "same function");
- // Use the C++ personality just to avoid returning null.
- return getCXXPersonalityFn(CGF);
+ // The GNU runtime's personality function inherently doesn't support
+ // mixed EH. Use the C++ personality just to avoid returning null.
+ return getCXXPersonality(L);
}
-static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF) {
- const char *Name;
- const LangOptions &Opts = CGF.CGM.getLangOptions();
- if (Opts.CPlusPlus && Opts.ObjC1)
- Name = getObjCXXPersonalityFn(CGF);
- else if (Opts.CPlusPlus)
- Name = getCXXPersonalityFn(CGF);
- else if (Opts.ObjC1)
- Name = getObjCPersonalityFn(CGF);
+const EHPersonality &EHPersonality::get(const LangOptions &L) {
+ if (L.CPlusPlus && L.ObjC1)
+ return getObjCXXPersonality(L);
+ else if (L.CPlusPlus)
+ return getCXXPersonality(L);
+ else if (L.ObjC1)
+ return getObjCPersonality(L);
else
- Name = getCPersonalityFn(CGF);
+ return getCPersonality(L);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF,
+ const EHPersonality &Personality) {
+ const char *Name = Personality.getPersonalityFnName();
- llvm::Constant *Personality =
+ llvm::Constant *Fn =
CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::Type::getInt32Ty(
CGF.CGM.getLLVMContext()),
true),
Name);
- return llvm::ConstantExpr::getBitCast(Personality, CGF.CGM.PtrToInt8Ty);
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.CGM.PtrToInt8Ty);
}
/// Returns the value to inject into a selector to indicate the
@@ -403,7 +386,7 @@ static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
- struct FreeExceptionCleanup : EHScopeStack::LazyCleanup {
+ struct FreeExceptionCleanup : EHScopeStack::Cleanup {
FreeExceptionCleanup(llvm::Value *ShouldFreeVar,
llvm::Value *ExnLocVar)
: ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {}
@@ -453,9 +436,9 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E,
// exception during initialization.
// FIXME: stmt expressions might require this to be a normal
// cleanup, too.
- CGF.EHStack.pushLazyCleanup<FreeExceptionCleanup>(EHCleanup,
- ShouldFreeVar,
- ExnLocVar);
+ CGF.EHStack.pushCleanup<FreeExceptionCleanup>(EHCleanup,
+ ShouldFreeVar,
+ ExnLocVar);
EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
@@ -637,7 +620,12 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
QualType CaughtType = C->getCaughtType();
CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
- llvm::Value *TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+
+ llvm::Value *TypeInfo = 0;
+ if (CaughtType->isObjCObjectPointerType())
+ TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
+ else
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
CatchScope->setHandler(I, TypeInfo, Handler);
} else {
// No exception decl indicates '...', a catch-all.
@@ -653,8 +641,6 @@ static bool isNonEHScope(const EHScope &S) {
switch (S.getKind()) {
case EHScope::Cleanup:
return !cast<EHCleanupScope>(S).isEHCleanup();
- case EHScope::LazyCleanup:
- return !cast<EHLazyCleanupScope>(S).isEHCleanup();
case EHScope::Filter:
case EHScope::Catch:
case EHScope::Terminate:
@@ -753,6 +739,9 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ const EHPersonality &Personality =
+ EHPersonality::get(CGF.CGM.getLangOptions());
+
// Create and configure the landing pad.
llvm::BasicBlock *LP = createBasicBlock("lpad");
EmitBlock(LP);
@@ -768,11 +757,11 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Build the selector arguments.
llvm::SmallVector<llvm::Value*, 8> EHSelector;
EHSelector.push_back(Exn);
- EHSelector.push_back(getPersonalityFn(*this));
+ EHSelector.push_back(getPersonalityFn(*this, Personality));
// Accumulate all the handlers in scope.
- llvm::DenseMap<llvm::Value*, JumpDest> EHHandlers;
- JumpDest CatchAll;
+ llvm::DenseMap<llvm::Value*, UnwindDest> EHHandlers;
+ UnwindDest CatchAll;
bool HasEHCleanup = false;
bool HasEHFilter = false;
llvm::SmallVector<llvm::Value*, 8> EHFilters;
@@ -780,12 +769,6 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
I != E; ++I) {
switch (I->getKind()) {
- case EHScope::LazyCleanup:
- if (!HasEHCleanup)
- HasEHCleanup = cast<EHLazyCleanupScope>(*I).isEHCleanup();
- // We otherwise don't care about cleanups.
- continue;
-
case EHScope::Cleanup:
if (!HasEHCleanup)
HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
@@ -794,7 +777,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Filter: {
assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
- assert(!CatchAll.Block && "EH filter reached after catch-all");
+ assert(!CatchAll.isValid() && "EH filter reached after catch-all");
// Filter scopes get added to the selector in wierd ways.
EHFilterScope &Filter = cast<EHFilterScope>(*I);
@@ -812,9 +795,10 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
case EHScope::Terminate:
// Terminate scopes are basically catch-alls.
- assert(!CatchAll.Block);
- CatchAll.Block = getTerminateHandler();
- CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ assert(!CatchAll.isValid());
+ CatchAll = UnwindDest(getTerminateHandler(),
+ EHStack.getEnclosingEHCleanup(I),
+ cast<EHTerminateScope>(*I).getDestIndex());
goto done;
case EHScope::Catch:
@@ -827,30 +811,32 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Catch-all. We should only have one of these per catch.
if (!Handler.Type) {
- assert(!CatchAll.Block);
- CatchAll.Block = Handler.Block;
- CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ assert(!CatchAll.isValid());
+ CatchAll = UnwindDest(Handler.Block,
+ EHStack.getEnclosingEHCleanup(I),
+ Handler.Index);
continue;
}
// Check whether we already have a handler for this type.
- JumpDest &Dest = EHHandlers[Handler.Type];
- if (Dest.Block) continue;
+ UnwindDest &Dest = EHHandlers[Handler.Type];
+ if (Dest.isValid()) continue;
EHSelector.push_back(Handler.Type);
- Dest.Block = Handler.Block;
- Dest.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ Dest = UnwindDest(Handler.Block,
+ EHStack.getEnclosingEHCleanup(I),
+ Handler.Index);
}
// Stop if we found a catch-all.
- if (CatchAll.Block) break;
+ if (CatchAll.isValid()) break;
}
done:
unsigned LastToEmitInLoop = EHSelector.size();
// If we have a catch-all, add null to the selector.
- if (CatchAll.Block) {
+ if (CatchAll.isValid()) {
EHSelector.push_back(getCatchAllValue(CGF));
// If we have an EH filter, we need to add those handlers in the
@@ -899,14 +885,15 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// filter (possibly with a cleanup), a catch-all, or another catch).
for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
llvm::Value *Type = EHSelector[I];
- JumpDest Dest = EHHandlers[Type];
- assert(Dest.Block && "no handler entry for value in selector?");
+ UnwindDest Dest = EHHandlers[Type];
+ assert(Dest.isValid() && "no handler entry for value in selector?");
// Figure out where to branch on a match. As a debug code-size
// optimization, if the scope depth matches the innermost cleanup,
// we branch directly to the catch handler.
- llvm::BasicBlock *Match = Dest.Block;
- bool MatchNeedsCleanup = Dest.ScopeDepth != EHStack.getInnermostEHCleanup();
+ llvm::BasicBlock *Match = Dest.getBlock();
+ bool MatchNeedsCleanup =
+ Dest.getScopeDepth() != EHStack.getInnermostEHCleanup();
if (MatchNeedsCleanup)
Match = createBasicBlock("eh.match");
@@ -932,7 +919,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Emit the final case in the selector.
// This might be a catch-all....
- if (CatchAll.Block) {
+ if (CatchAll.isValid()) {
assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
EmitBranchThroughEHCleanup(CatchAll);
@@ -951,7 +938,8 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
}
llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
- EmitBranchThroughEHCleanup(JumpDest(CleanupContBB, EHStack.stable_end()));
+ EmitBranchThroughEHCleanup(UnwindDest(CleanupContBB, EHStack.stable_end(),
+ EHStack.getNextEHDestIndex()));
EmitBlock(CleanupContBB);
if (HasEHCleanup)
@@ -996,22 +984,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// ...or a cleanup.
} else {
- // We emit a jump to a notional label at the outermost unwind state.
- llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
- JumpDest Dest(Unwind, EHStack.stable_end());
- EmitBranchThroughEHCleanup(Dest);
-
- // The unwind block. We have to reload the exception here because
- // we might have unwound through arbitrary blocks, so the landing
- // pad might not dominate.
- EmitBlock(Unwind);
-
- // This can always be a call because we necessarily didn't find
- // anything on the EH stack which needs our help.
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(getExceptionSlot()))
- ->setDoesNotReturn();
- Builder.CreateUnreachable();
+ EmitBranchThroughEHCleanup(getRethrowDest());
}
// Restore the old IR generation state.
@@ -1033,7 +1006,7 @@ namespace {
/// of the caught type, so we have to assume the actual thrown
/// exception type might have a throwing destructor, even if the
/// caught type's destructor is trivial or nothrow.
- struct CallEndCatch : EHScopeStack::LazyCleanup {
+ struct CallEndCatch : EHScopeStack::Cleanup {
CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
bool MightThrow;
@@ -1058,7 +1031,7 @@ static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
Call->setDoesNotThrow();
- CGF.EHStack.pushLazyCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+ CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
return Call;
}
@@ -1078,11 +1051,57 @@ static void InitCatchParam(CodeGenFunction &CGF,
// If we're catching by reference, we can just cast the object
// pointer to the appropriate pointer.
if (isa<ReferenceType>(CatchType)) {
- bool EndCatchMightThrow = cast<ReferenceType>(CatchType)->getPointeeType()
- ->isRecordType();
+ QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
+ bool EndCatchMightThrow = CaughtType->isRecordType();
// __cxa_begin_catch returns the adjusted object pointer.
llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+
+ // We have no way to tell the personality function that we're
+ // catching by reference, so if we're catching a pointer,
+ // __cxa_begin_catch will actually return that pointer by value.
+ if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
+ QualType PointeeType = PT->getPointeeType();
+
+ // When catching by reference, generally we should just ignore
+ // this by-value pointer and use the exception object instead.
+ if (!PointeeType->isRecordType()) {
+
+ // Exn points to the struct _Unwind_Exception header, which
+ // we have to skip past in order to reach the exception data.
+ unsigned HeaderSize =
+ CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
+ AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+
+ // However, if we're catching a pointer-to-record type that won't
+ // work, because the personality function might have adjusted
+ // the pointer. There's actually no way for us to fully satisfy
+ // the language/ABI contract here: we can't use Exn because it
+ // might have the wrong adjustment, but we can't use the by-value
+ // pointer because it's off by a level of abstraction.
+ //
+ // The current solution is to dump the adjusted pointer into an
+ // alloca, which breaks language semantics (because changing the
+ // pointer doesn't change the exception) but at least works.
+ // The better solution would be to filter out non-exact matches
+ // and rethrow them, but this is tricky because the rethrow
+ // really needs to be catchable by other sites at this landing
+ // pad. The best solution is to fix the personality function.
+ } else {
+ // Pull the pointer for the reference type off.
+ const llvm::Type *PtrTy =
+ cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+
+ // Create the temporary and write the adjusted pointer into it.
+ llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.Builder.CreateStore(Casted, ExnPtrTmp);
+
+ // Bind the reference to the temporary.
+ AdjustedExn = ExnPtrTmp;
+ }
+ }
+
llvm::Value *ExnCast =
CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
CGF.Builder.CreateStore(ExnCast, ParamAddr);
@@ -1113,8 +1132,11 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
ParamAddr, /*volatile*/ false);
} else {
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(&CatchParam).getQuantity();
llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
- CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, CatchType);
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, Alignment,
+ CatchType);
}
return;
}
@@ -1203,7 +1225,7 @@ static void BeginCatch(CodeGenFunction &CGF,
}
namespace {
- struct CallRethrow : EHScopeStack::LazyCleanup {
+ struct CallRethrow : EHScopeStack::Cleanup {
void Emit(CodeGenFunction &CGF, bool IsForEH) {
CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0);
}
@@ -1253,7 +1275,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// _cxa_rethrow. This needs to happen before __cxa_end_catch is
// called, and so it is pushed after BeginCatch.
if (ImplicitRethrow)
- EHStack.pushLazyCleanup<CallRethrow>(NormalCleanup);
+ EHStack.pushCleanup<CallRethrow>(NormalCleanup);
// Perform the body of the catch.
EmitStmt(C->getHandlerBlock());
@@ -1269,6 +1291,97 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
EmitBlock(ContBB);
}
+namespace {
+ struct CallEndCatchForFinally : EHScopeStack::Cleanup {
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB =
+ CGF.createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ CGF.EmitBlock(EndCatchBB);
+ CGF.EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
+ CGF.EmitBlock(CleanupContBB);
+ }
+ };
+
+ struct PerformFinally : EHScopeStack::Cleanup {
+ const Stmt *Body;
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ llvm::Value *RethrowFn;
+ llvm::Value *SavedExnVar;
+
+ PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
+ llvm::Value *EndCatchFn,
+ llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn)
+ CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
+ ForEHVar, EndCatchFn);
+
+ // Save the current cleanup destination in case there are
+ // cleanups in the finally block.
+ llvm::Value *SavedCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest.saved");
+
+ // Emit the finally block.
+ CGF.EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (CGF.HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ CGF.EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ llvm::Value *Args[] = { CGF.Builder.CreateLoad(SavedExnVar) };
+ CGF.EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ } else {
+ CGF.EmitCallOrInvoke(RethrowFn, 0, 0);
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(ContBB);
+
+ // Restore the cleanup destination.
+ CGF.Builder.CreateStore(SavedCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.PopCleanupBlock();
+ CGF.Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ CGF.EnsureInsertPoint();
+ }
+ };
+}
+
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
/// language/ABI-specific behavior in the catch-all sections.
@@ -1320,62 +1433,9 @@ CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
// Enter a normal cleanup which will perform the @finally block.
- {
- CodeGenFunction::CleanupBlock Cleanup(*this, NormalCleanup);
-
- // Enter a cleanup to call the end-catch function if one was provided.
- if (EndCatchFn) {
- CodeGenFunction::CleanupBlock FinallyExitCleanup(CGF, NormalAndEHCleanup);
-
- llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch");
- llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont");
-
- llvm::Value *ShouldEndCatch =
- Builder.CreateLoad(ForEHVar, "finally.endcatch");
- Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
- EmitBlock(EndCatchBB);
- EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw
- EmitBlock(CleanupContBB);
- }
-
- // Emit the finally block.
- EmitStmt(Body);
-
- // If the end of the finally is reachable, check whether this was
- // for EH. If so, rethrow.
- if (HaveInsertPoint()) {
- llvm::BasicBlock *RethrowBB = createBasicBlock("finally.rethrow");
- llvm::BasicBlock *ContBB = createBasicBlock("finally.cont");
-
- llvm::Value *ShouldRethrow =
- Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
- Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
-
- EmitBlock(RethrowBB);
- if (SavedExnVar) {
- llvm::Value *Args[] = { Builder.CreateLoad(SavedExnVar) };
- EmitCallOrInvoke(RethrowFn, Args, Args+1);
- } else {
- EmitCallOrInvoke(RethrowFn, 0, 0);
- }
- Builder.CreateUnreachable();
-
- EmitBlock(ContBB);
- }
-
- // Leave the end-catch cleanup. As an optimization, pretend that
- // the fallthrough path was inaccessible; we've dynamically proven
- // that we're not in the EH case along that path.
- if (EndCatchFn) {
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- PopCleanupBlock();
- Builder.restoreIP(SavedIP);
- }
-
- // Now make sure we actually have an insertion point or the
- // cleanup gods will hate us.
- EnsureInsertPoint();
- }
+ EHStack.pushCleanup<PerformFinally>(NormalCleanup, Body,
+ ForEHVar, EndCatchFn,
+ RethrowFn, SavedExnVar);
// Enter a catch-all scope.
llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
@@ -1437,10 +1497,12 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
llvm::CallInst *Exn =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
Exn->setDoesNotThrow();
+
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
// Tell the backend what the exception table should be:
// nothing but a catch-all.
- llvm::Value *Args[3] = { Exn, getPersonalityFn(*this),
+ llvm::Value *Args[3] = { Exn, getPersonalityFn(*this, Personality),
getCatchAllValue(*this) };
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
Args, Args+3, "eh.selector")
@@ -1478,69 +1540,35 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
return TerminateHandler;
}
-CodeGenFunction::CleanupBlock::CleanupBlock(CodeGenFunction &CGF,
- CleanupKind Kind)
- : CGF(CGF), SavedIP(CGF.Builder.saveIP()), NormalCleanupExitBB(0) {
- llvm::BasicBlock *EntryBB = CGF.createBasicBlock("cleanup");
- CGF.Builder.SetInsertPoint(EntryBB);
-
- switch (Kind) {
- case NormalAndEHCleanup:
- NormalCleanupEntryBB = EHCleanupEntryBB = EntryBB;
- break;
-
- case NormalCleanup:
- NormalCleanupEntryBB = EntryBB;
- EHCleanupEntryBB = 0;
- break;
-
- case EHCleanup:
- NormalCleanupEntryBB = 0;
- EHCleanupEntryBB = EntryBB;
- CGF.EHStack.pushTerminate();
- break;
- }
-}
+CodeGenFunction::UnwindDest CodeGenFunction::getRethrowDest() {
+ if (RethrowBlock.isValid()) return RethrowBlock;
-void CodeGenFunction::CleanupBlock::beginEHCleanup() {
- assert(EHCleanupEntryBB == 0 && "already started an EH cleanup");
- NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
- assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
-
- EHCleanupEntryBB = CGF.createBasicBlock("eh.cleanup");
- CGF.Builder.SetInsertPoint(EHCleanupEntryBB);
- CGF.EHStack.pushTerminate();
-}
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
-CodeGenFunction::CleanupBlock::~CleanupBlock() {
- llvm::BasicBlock *EHCleanupExitBB = 0;
+ // We emit a jump to a notional label at the outermost unwind state.
+ llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(Unwind);
- // If we're currently writing the EH cleanup...
- if (EHCleanupEntryBB) {
- // Set the EH cleanup exit block.
- EHCleanupExitBB = CGF.Builder.GetInsertBlock();
- assert(EHCleanupExitBB && "end of EH cleanup is unreachable");
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOptions());
- // If we're actually writing both at once, set the normal exit, too.
- if (EHCleanupEntryBB == NormalCleanupEntryBB)
- NormalCleanupExitBB = EHCleanupExitBB;
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ llvm::Constant *RethrowFn;
+ if (const char *RethrowName = Personality.getCatchallRethrowFnName())
+ RethrowFn = getCatchallRethrowFn(*this, RethrowName);
+ else
+ RethrowFn = getUnwindResumeOrRethrowFn();
- // Otherwise, we must have pushed a terminate handler.
- else
- CGF.EHStack.popTerminate();
+ Builder.CreateCall(RethrowFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
- // Otherwise, just set the normal cleanup exit block.
- } else {
- NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
- assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
- }
-
- CGF.EHStack.pushCleanup(NormalCleanupEntryBB, NormalCleanupExitBB,
- EHCleanupEntryBB, EHCleanupExitBB);
+ Builder.restoreIP(SavedIP);
- CGF.Builder.restoreIP(SavedIP);
+ RethrowBlock = UnwindDest(Unwind, EHStack.stable_end(), 0);
+ return RethrowBlock;
}
-EHScopeStack::LazyCleanup::~LazyCleanup() {
- llvm_unreachable("LazyCleanup is indestructable");
+EHScopeStack::Cleanup::~Cleanup() {
+ llvm_unreachable("Cleanup is indestructable");
}
diff --git a/lib/CodeGen/CGException.h b/lib/CodeGen/CGException.h
index 80739cd8d73e..f1294746a42d 100644
--- a/lib/CodeGen/CGException.h
+++ b/lib/CodeGen/CGException.h
@@ -27,17 +27,43 @@ namespace llvm {
namespace clang {
namespace CodeGen {
+/// The exceptions personality for a function. When
+class EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ EHPersonality(const char *PersonalityFn,
+ const char *CatchallRethrowFn = 0)
+ : PersonalityFn(PersonalityFn),
+ CatchallRethrowFn(CatchallRethrowFn) {}
+
+public:
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+
+ const char *getPersonalityFnName() const { return PersonalityFn; }
+ const char *getCatchallRethrowFnName() const { return CatchallRethrowFn; }
+};
+
/// A protected scope for zero-cost EH handling.
class EHScope {
llvm::BasicBlock *CachedLandingPad;
- unsigned K : 3;
+ unsigned K : 2;
protected:
- enum { BitsRemaining = 29 };
+ enum { BitsRemaining = 30 };
public:
- enum Kind { Cleanup, LazyCleanup, Catch, Terminate, Filter };
+ enum Kind { Cleanup, Catch, Terminate, Filter };
EHScope(Kind K) : CachedLandingPad(0), K(K) {}
@@ -74,15 +100,13 @@ public:
/// The catch handler for this type.
llvm::BasicBlock *Block;
- static Handler make(llvm::Value *Type, llvm::BasicBlock *Block) {
- Handler Temp;
- Temp.Type = Type;
- Temp.Block = Block;
- return Temp;
- }
+ /// The unwind destination index for this handler.
+ unsigned Index;
};
private:
+ friend class EHScopeStack;
+
Handler *getHandlers() {
return reinterpret_cast<Handler*>(this+1);
}
@@ -110,7 +134,8 @@ public:
void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
assert(I < getNumHandlers());
- getHandlers()[I] = Handler::make(Type, Block);
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
}
const Handler &getHandler(unsigned I) const {
@@ -128,21 +153,27 @@ public:
};
/// A cleanup scope which generates the cleanup blocks lazily.
-class EHLazyCleanupScope : public EHScope {
+class EHCleanupScope : public EHScope {
/// Whether this cleanup needs to be run along normal edges.
bool IsNormalCleanup : 1;
/// Whether this cleanup needs to be run along exception edges.
bool IsEHCleanup : 1;
- /// The amount of extra storage needed by the LazyCleanup.
+ /// Whether this cleanup was activated before all normal uses.
+ bool ActivatedBeforeNormalUse : 1;
+
+ /// Whether this cleanup was activated before all EH uses.
+ bool ActivatedBeforeEHUse : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
/// Always a multiple of the scope-stack alignment.
unsigned CleanupSize : 12;
/// The number of fixups required by enclosing scopes (not including
/// this one). If this is the top cleanup scope, all the fixups
/// from this index onwards belong to this scope.
- unsigned FixupDepth : BitsRemaining - 14;
+ unsigned FixupDepth : BitsRemaining - 16;
/// The nearest normal cleanup scope enclosing this one.
EHScopeStack::stable_iterator EnclosingNormal;
@@ -158,27 +189,78 @@ class EHLazyCleanupScope : public EHScope {
/// created if needed before the cleanup is popped.
llvm::BasicBlock *EHBlock;
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet. This has one of three states:
+ /// - it is null if the cleanup is inactive
+ /// - it is activeSentinel() if the cleanup is active and was not
+ /// required before activation
+ /// - it points to a valid variable
+ llvm::AllocaInst *ActiveVar;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+
+ /// The destinations of EH branch-afters and branch-throughs.
+ /// TODO: optimize for the extremely common case of a single
+ /// branch-through.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> EHBranches;
+
+ /// EH branch-afters.
+ llvm::SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ EHBranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
static size_t getSizeForCleanupSize(size_t Size) {
- return sizeof(EHLazyCleanupScope) + Size;
+ return sizeof(EHCleanupScope) + Size;
}
size_t getAllocatedSize() const {
- return sizeof(EHLazyCleanupScope) + CleanupSize;
+ return sizeof(EHCleanupScope) + CleanupSize;
}
- EHLazyCleanupScope(bool IsNormal, bool IsEH, unsigned CleanupSize,
- unsigned FixupDepth,
- EHScopeStack::stable_iterator EnclosingNormal,
- EHScopeStack::stable_iterator EnclosingEH)
- : EHScope(EHScope::LazyCleanup),
+ EHCleanupScope(bool IsNormal, bool IsEH, bool IsActive,
+ unsigned CleanupSize, unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH)
+ : EHScope(EHScope::Cleanup),
IsNormalCleanup(IsNormal), IsEHCleanup(IsEH),
+ ActivatedBeforeNormalUse(IsActive),
+ ActivatedBeforeEHUse(IsActive),
CleanupSize(CleanupSize), FixupDepth(FixupDepth),
EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
- NormalBlock(0), EHBlock(0)
- {}
+ NormalBlock(0), EHBlock(0),
+ ActiveVar(IsActive ? activeSentinel() : 0),
+ ExtInfo(0)
+ {
+ assert(this->CleanupSize == CleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
bool isNormalCleanup() const { return IsNormalCleanup; }
llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
@@ -188,6 +270,20 @@ public:
llvm::BasicBlock *getEHBlock() const { return EHBlock; }
void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; }
+ static llvm::AllocaInst *activeSentinel() {
+ return reinterpret_cast<llvm::AllocaInst*>(1);
+ }
+
+ bool isActive() const { return ActiveVar != 0; }
+ llvm::AllocaInst *getActiveVar() const { return ActiveVar; }
+ void setActiveVar(llvm::AllocaInst *Var) { ActiveVar = Var; }
+
+ bool wasActivatedBeforeNormalUse() const { return ActivatedBeforeNormalUse; }
+ void setActivatedBeforeNormalUse(bool B) { ActivatedBeforeNormalUse = B; }
+
+ bool wasActivatedBeforeEHUse() const { return ActivatedBeforeEHUse; }
+ void setActivatedBeforeEHUse(bool B) { ActivatedBeforeEHUse = B; }
+
unsigned getFixupDepth() const { return FixupDepth; }
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
return EnclosingNormal;
@@ -199,67 +295,108 @@ public:
size_t getCleanupSize() const { return CleanupSize; }
void *getCleanupBuffer() { return this + 1; }
- EHScopeStack::LazyCleanup *getCleanup() {
- return reinterpret_cast<EHScopeStack::LazyCleanup*>(getCleanupBuffer());
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
}
- static bool classof(const EHScope *Scope) {
- return (Scope->getKind() == LazyCleanup);
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
}
-};
-/// A scope which needs to execute some code if we try to unwind ---
-/// either normally, via the EH mechanism, or both --- through it.
-class EHCleanupScope : public EHScope {
- /// The number of fixups required by enclosing scopes (not including
- /// this one). If this is the top cleanup scope, all the fixups
- /// from this index onwards belong to this scope.
- unsigned FixupDepth : BitsRemaining;
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
- /// The nearest normal cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingNormal;
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
- /// The nearest EH cleanup scope enclosing this one.
- EHScopeStack::stable_iterator EnclosingEH;
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
- llvm::BasicBlock *NormalEntry;
- llvm::BasicBlock *NormalExit;
- llvm::BasicBlock *EHEntry;
- llvm::BasicBlock *EHExit;
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
-public:
- static size_t getSize() { return sizeof(EHCleanupScope); }
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
- EHCleanupScope(unsigned FixupDepth,
- EHScopeStack::stable_iterator EnclosingNormal,
- EHScopeStack::stable_iterator EnclosingEH,
- llvm::BasicBlock *NormalEntry, llvm::BasicBlock *NormalExit,
- llvm::BasicBlock *EHEntry, llvm::BasicBlock *EHExit)
- : EHScope(Cleanup), FixupDepth(FixupDepth),
- EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
- NormalEntry(NormalEntry), NormalExit(NormalExit),
- EHEntry(EHEntry), EHExit(EHExit) {
- assert((NormalEntry != 0) == (NormalExit != 0));
- assert((EHEntry != 0) == (EHExit != 0));
+ // Same stuff, only for EH branches instead of normal branches.
+ // It's quite possible that we could find a better representation
+ // for this.
+
+ bool hasEHBranches() const { return ExtInfo && !ExtInfo->EHBranches.empty(); }
+ void addEHBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.EHBranches.insert(Block))
+ ExtInfo.EHBranchAfters.push_back(std::make_pair(Block, Index));
}
- bool isNormalCleanup() const { return NormalEntry != 0; }
- bool isEHCleanup() const { return EHEntry != 0; }
+ unsigned getNumEHBranchAfters() const {
+ return ExtInfo ? ExtInfo->EHBranchAfters.size() : 0;
+ }
- llvm::BasicBlock *getNormalEntry() const { return NormalEntry; }
- llvm::BasicBlock *getNormalExit() const { return NormalExit; }
- llvm::BasicBlock *getEHEntry() const { return EHEntry; }
- llvm::BasicBlock *getEHExit() const { return EHExit; }
- unsigned getFixupDepth() const { return FixupDepth; }
- EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
- return EnclosingNormal;
+ llvm::BasicBlock *getEHBranchAfterBlock(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].first;
}
- EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
- return EnclosingEH;
+
+ llvm::ConstantInt *getEHBranchAfterIndex(unsigned I) const {
+ assert(I < getNumEHBranchAfters());
+ return ExtInfo->EHBranchAfters[I].second;
+ }
+
+ bool addEHBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().EHBranches.insert(Block);
+ }
+
+ bool hasEHBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->EHBranchAfters.size() != ExtInfo->EHBranches.size());
}
static bool classof(const EHScope *Scope) {
- return Scope->getKind() == Cleanup;
+ return (Scope->getKind() == Cleanup);
}
};
@@ -310,10 +447,13 @@ public:
/// An exceptions scope which calls std::terminate if any exception
/// reaches it.
class EHTerminateScope : public EHScope {
+ unsigned DestIndex : BitsRemaining;
public:
- EHTerminateScope() : EHScope(Terminate) {}
+ EHTerminateScope(unsigned Index) : EHScope(Terminate), DestIndex(Index) {}
static size_t getSize() { return sizeof(EHTerminateScope); }
+ unsigned getDestIndex() const { return DestIndex; }
+
static bool classof(const EHScope *Scope) {
return Scope->getKind() == Terminate;
}
@@ -348,13 +488,9 @@ public:
static_cast<const EHFilterScope*>(get())->getNumFilters());
break;
- case EHScope::LazyCleanup:
- Ptr += static_cast<const EHLazyCleanupScope*>(get())
- ->getAllocatedSize();
- break;
-
case EHScope::Cleanup:
- Ptr += EHCleanupScope::getSize();
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
break;
case EHScope::Terminate:
@@ -377,6 +513,9 @@ public:
return copy;
}
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
bool operator==(iterator other) const { return Ptr == other.Ptr; }
bool operator!=(iterator other) const { return Ptr != other.Ptr; }
};
@@ -396,6 +535,8 @@ inline void EHScopeStack::popCatch() {
StartOfData += EHCatchScope::getSizeForNumHandlers(
cast<EHCatchScope>(*begin()).getNumHandlers());
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
CatchDepth--;
}
@@ -406,6 +547,8 @@ inline void EHScopeStack::popTerminate() {
assert(isa<EHTerminateScope>(*begin()));
StartOfData += EHTerminateScope::getSize();
+ if (empty()) NextEHDestIndex = FirstEHDestIndex;
+
assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
CatchDepth--;
}
@@ -422,6 +565,28 @@ EHScopeStack::stabilize(iterator ir) const {
return stable_iterator(EndOfBuffer - ir.Ptr);
}
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostNormalCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveEHCleanup() const {
+ for (EHScopeStack::stable_iterator
+ I = getInnermostEHCleanup(), E = stable_end(); I != E; ) {
+ EHCleanupScope &S = cast<EHCleanupScope>(*find(I));
+ if (S.isActive()) return I;
+ I = S.getEnclosingEHCleanup();
+ }
+ return stable_end();
+}
+
}
}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 43bab9fece6e..3750ab80c3fc 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGCall.h"
+#include "CGCXXABI.h"
#include "CGRecordLayout.h"
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
@@ -65,22 +66,12 @@ llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
- QualType BoolTy = getContext().BoolTy;
- if (E->getType()->isMemberFunctionPointerType()) {
- LValue LV = EmitAggExprToLValue(E);
-
- // Get the pointer.
- llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0,
- "src.ptr");
- FuncPtr = Builder.CreateLoad(FuncPtr);
-
- llvm::Value *IsNotNull =
- Builder.CreateICmpNE(FuncPtr,
- llvm::Constant::getNullValue(FuncPtr->getType()),
- "tobool");
-
- return IsNotNull;
+ if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
+ llvm::Value *MemPtr = EmitScalarExpr(E);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
}
+
+ QualType BoolTy = getContext().BoolTy;
if (!E->getType()->isAnyComplexType())
return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
@@ -130,7 +121,7 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit);
else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
- LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType()));
+ LValue LV = MakeAddrLValue(Location, E->getType());
EmitStoreThroughLValue(RV, LV, E->getType());
}
}
@@ -142,17 +133,14 @@ struct SubobjectAdjustment {
union {
struct {
- const CXXBaseSpecifierArray *BasePath;
+ const CastExpr *BasePath;
const CXXRecordDecl *DerivedClass;
} DerivedToBase;
- struct {
- FieldDecl *Field;
- unsigned CVRQualifiers;
- } Field;
+ FieldDecl *Field;
};
- SubobjectAdjustment(const CXXBaseSpecifierArray *BasePath,
+ SubobjectAdjustment(const CastExpr *BasePath,
const CXXRecordDecl *DerivedClass)
: Kind(DerivedToBaseAdjustment)
{
@@ -160,11 +148,10 @@ struct SubobjectAdjustment {
DerivedToBase.DerivedClass = DerivedClass;
}
- SubobjectAdjustment(FieldDecl *Field, unsigned CVRQualifiers)
- : Kind(FieldAdjustment)
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment)
{
- this->Field.Field = Field;
- this->Field.CVRQualifiers = CVRQualifiers;
+ this->Field = Field;
}
};
@@ -174,7 +161,7 @@ CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
llvm::SmallString<256> Name;
- CGF.CGM.getMangleContext().mangleReferenceTemporary(VD, Name);
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Name);
const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
@@ -230,18 +217,17 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
}
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if ((CE->getCastKind() == CastExpr::CK_DerivedToBase ||
- CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) &&
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
E->getType()->isRecordType()) {
E = CE->getSubExpr();
CXXRecordDecl *Derived
= cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
- Adjustments.push_back(SubobjectAdjustment(&CE->getBasePath(),
- Derived));
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
continue;
}
- if (CE->getCastKind() == CastExpr::CK_NoOp) {
+ if (CE->getCastKind() == CK_NoOp) {
E = CE->getSubExpr();
continue;
}
@@ -250,8 +236,7 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
ME->getBase()->getType()->isRecordType()) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
E = ME->getBase();
- Adjustments.push_back(SubobjectAdjustment(Field,
- E->getType().getCVRQualifiers()));
+ Adjustments.push_back(SubobjectAdjustment(Field));
continue;
}
}
@@ -291,14 +276,14 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
Object =
CGF.GetAddressOfBaseClass(Object,
Adjustment.DerivedToBase.DerivedClass,
- *Adjustment.DerivedToBase.BasePath,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
/*NullCheckValue=*/false);
break;
case SubobjectAdjustment::FieldAdjustment: {
- unsigned CVR = Adjustment.Field.CVRQualifiers;
LValue LV =
- CGF.EmitLValueForField(Object, Adjustment.Field.Field, CVR);
+ CGF.EmitLValueForField(Object, Adjustment.Field, 0);
if (LV.isSimple()) {
Object = LV.getAddress();
break;
@@ -306,11 +291,11 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
// For non-simple lvalues, we actually have to create a copy of
// the object we're binding to.
- QualType T = Adjustment.Field.Field->getType().getNonReferenceType()
- .getUnqualifiedType();
+ QualType T = Adjustment.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
- LValue TempLV = LValue::MakeAddr(Object,
- Qualifiers::fromCVRMask(CVR));
+ LValue TempLV = CGF.MakeAddrLValue(Object,
+ Adjustment.Field->getType());
CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
break;
}
@@ -330,9 +315,12 @@ EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E,
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
if (RV.isScalar())
CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
- /*Volatile=*/false, E->getType());
+ /*Volatile=*/false, Alignment, E->getType());
else
CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
/*Volatile=*/false);
@@ -347,7 +335,6 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
ReferenceTemporaryDtor,
InitializedDecl);
-
if (!ReferenceTemporaryDtor)
return RValue::get(Value);
@@ -362,16 +349,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
return RValue::get(Value);
}
}
-
- CleanupBlock Cleanup(*this, NormalCleanup);
- EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
- /*ForVirtualBase=*/false, ReferenceTemporary);
-
- if (Exceptions) {
- Cleanup.beginEHCleanup();
- EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
- /*ForVirtualBase=*/false, ReferenceTemporary);
- }
+
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
return RValue::get(Value);
}
@@ -462,9 +441,12 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
return RValue::getComplex(std::make_pair(U, U));
}
+ // If this is a use of an undefined aggregate type, the aggregate must have an
+ // identifiable address. Just because the contents of the value are undefined
+ // doesn't mean that the address can't be taken and compared.
if (hasAggregateLLVMType(Ty)) {
- const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
- return RValue::getAggregate(llvm::UndefValue::get(LTy));
+ llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ return RValue::getAggregate(DestPtr);
}
return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
@@ -480,8 +462,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return LValue::MakeAddr(llvm::UndefValue::get(Ty),
- MakeQualifiers(E->getType()));
+ return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
}
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
@@ -590,10 +571,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- QualType Ty) {
+ unsigned Alignment, QualType Ty) {
llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
if (Volatile)
Load->setVolatile(true);
+ if (Alignment)
+ Load->setAlignment(Alignment);
// Bool can have different representation in memory than in registers.
llvm::Value *V = Load;
@@ -605,14 +588,18 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, QualType Ty) {
+ bool Volatile, unsigned Alignment,
+ QualType Ty) {
if (Ty->isBooleanType()) {
// Bool can have different representation in memory than in registers.
const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
}
- Builder.CreateStore(Value, Addr, Volatile);
+
+ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
+ if (Alignment)
+ Store->setAlignment(Alignment);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -628,18 +615,15 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
if (LV.isSimple()) {
llvm::Value *Ptr = LV.getAddress();
- const llvm::Type *EltTy =
- cast<llvm::PointerType>(Ptr->getType())->getElementType();
- // Simple scalar l-value.
- //
- // FIXME: We shouldn't have to use isSingleValueType here.
- if (EltTy->isSingleValueType())
- return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
- ExprType));
+ // Functions are l-values that don't require loading.
+ if (ExprType->isFunctionType())
+ return RValue::get(Ptr);
+
+ // Everything needs a load.
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ LV.getAlignment(), ExprType));
- assert(ExprType->isFunctionType() && "Unknown scalar value");
- return RValue::get(Ptr);
}
if (LV.isVectorElt()) {
@@ -836,8 +820,10 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
BytesBetween);
- } else if (Dst.isGlobalObjCRef())
- CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+ } else if (Dst.isGlobalObjCRef()) {
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
+ Dst.isThreadLocalRef());
+ }
else
CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
return;
@@ -845,7 +831,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
assert(Src.isScalar() && "Can't emit an agg store with this method");
EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
- Dst.isVolatileQualified(), Ty);
+ Dst.isVolatileQualified(), Dst.getAlignment(), Ty);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -1045,20 +1031,22 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
return;
if (isa<ObjCIvarRefExpr>(E)) {
- LV.SetObjCIvar(LV, true);
+ LV.setObjCIvar(true);
ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
LV.setBaseIvarExp(Exp->getBase());
- LV.SetObjCArray(LV, E->getType()->isArrayType());
+ LV.setObjCArray(E->getType()->isArrayType());
return;
}
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
- VD->isFileVarDecl())
- LV.SetGlobalObjCRef(LV, true);
+ VD->isFileVarDecl()) {
+ LV.setGlobalObjCRef(true);
+ LV.setThreadLocalRef(VD->isThreadSpecified());
+ }
}
- LV.SetObjCArray(LV, E->getType()->isArrayType());
+ LV.setObjCArray(E->getType()->isArrayType());
return;
}
@@ -1076,7 +1064,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (ExpTy->isPointerType())
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType())
- LV.SetObjCIvar(LV, false);
+ LV.setObjCIvar(false);
}
return;
}
@@ -1095,11 +1083,11 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (LV.isObjCIvar() && !LV.isObjCArray())
// Using array syntax to assigning to what an ivar points to is not
// same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
- LV.SetObjCIvar(LV, false);
+ LV.setObjCIvar(false);
else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
// Using array syntax to assigning to what global points to is not
// same as assigning to the global itself. {id *G;} G[i] = 0;
- LV.SetGlobalObjCRef(LV, false);
+ LV.setGlobalObjCRef(false);
return;
}
@@ -1107,7 +1095,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
// We don't know if member is an 'ivar', but this flag is looked at
// only in the context of LV.isObjCIvar().
- LV.SetObjCArray(LV, E->getType()->isArrayType());
+ LV.setObjCArray(E->getType()->isArrayType());
return;
}
}
@@ -1120,7 +1108,8 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = CGF.Builder.CreateLoad(V, "tmp");
- LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+ unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
+ LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
}
@@ -1140,20 +1129,18 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
}
}
- return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+ unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
+ unsigned Alignment = CGF.getContext().getDeclAlign(ND).getQuantity();
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl* VD = cast<ValueDecl>(ND);
llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
-
- Qualifiers Quals = MakeQualifiers(E->getType());
- LValue LV = LValue::MakeAddr(Aliasee, Quals);
-
- return LV;
+ return MakeAddrLValue(Aliasee, E->getType(), Alignment);
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
@@ -1170,11 +1157,6 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
V = CGM.getStaticLocalDeclAddress(VD);
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- Qualifiers Quals = MakeQualifiers(E->getType());
- // local variables do not get their gc attribute set.
- // local static?
- if (NonGCable) Quals.removeObjCGCAttr();
-
if (VD->hasAttr<BlocksAttr>()) {
V = Builder.CreateStructGEP(V, 1, "forwarding");
V = Builder.CreateLoad(V);
@@ -1183,21 +1165,31 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- LValue LV = LValue::MakeAddr(V, Quals);
- LValue::SetObjCNonGC(LV, NonGCable);
+
+ LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
+ if (NonGCable) {
+ LV.getQuals().removeObjCGCAttr();
+ LV.setNonGC(true);
+ }
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
+ // If we're emitting an instance method as an independent lvalue,
+ // we're actually emitting a member pointer.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance()) {
+ llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(MD);
+ return MakeAddrLValue(V, MD->getType(), Alignment);
+ }
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
- // FIXME: the qualifier check does not seem sufficient here
- if (E->getQualifier()) {
- const FieldDecl *FD = cast<FieldDecl>(ND);
- llvm::Value *V = CGM.EmitPointerToDataMember(FD);
-
- return LValue::MakeAddr(V, MakeQualifiers(FD->getType()));
+ // If we're emitting a field as an independent lvalue, we're
+ // actually emitting a member pointer.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ND)) {
+ llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(FD);
+ return MakeAddrLValue(V, FD->getType(), Alignment);
}
assert(false && "Unhandled DeclRefExpr");
@@ -1208,25 +1200,26 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
- return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType()));
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(E->getDecl()).getQuantity();
+ return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __extension__ doesn't affect lvalue-ness.
- if (E->getOpcode() == UnaryOperator::Extension)
+ if (E->getOpcode() == UO_Extension)
return EmitLValue(E->getSubExpr());
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
switch (E->getOpcode()) {
default: assert(0 && "Unknown unary operator lvalue!");
- case UnaryOperator::Deref: {
+ case UO_Deref: {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- Qualifiers Quals = MakeQualifiers(T);
- Quals.setAddressSpace(ExprTy.getAddressSpace());
+ LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
- LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
@@ -1234,21 +1227,21 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
if (getContext().getLangOptions().ObjC1 &&
getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
LV.isObjCWeak())
- LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
}
- case UnaryOperator::Real:
- case UnaryOperator::Imag: {
+ case UO_Real:
+ case UO_Imag: {
LValue LV = EmitLValue(E->getSubExpr());
- unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
- return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
+ unsigned Idx = E->getOpcode() == UO_Imag;
+ return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
Idx, "idx"),
- MakeQualifiers(ExprTy));
+ ExprTy);
}
- case UnaryOperator::PreInc:
- case UnaryOperator::PreDec: {
+ case UO_PreInc:
+ case UO_PreDec: {
LValue LV = EmitLValue(E->getSubExpr());
- bool isInc = E->getOpcode() == UnaryOperator::PreInc;
+ bool isInc = E->getOpcode() == UO_PreInc;
if (E->getType()->isAnyComplexType())
EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
@@ -1260,53 +1253,56 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
}
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
- return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E),
- Qualifiers());
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
+ E->getType());
}
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
- return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E),
- Qualifiers());
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ E->getType());
}
-LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
- std::string GlobalVarName;
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
- switch (Type) {
- default: assert(0 && "Invalid type");
case PredefinedExpr::Func:
- GlobalVarName = "__func__.";
- break;
case PredefinedExpr::Function:
- GlobalVarName = "__FUNCTION__.";
- break;
- case PredefinedExpr::PrettyFunction:
- GlobalVarName = "__PRETTY_FUNCTION__.";
- break;
- }
+ case PredefinedExpr::PrettyFunction: {
+ unsigned Type = E->getIdentType();
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default: assert(0 && "Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
- llvm::StringRef FnName = CurFn->getName();
- if (FnName.startswith("\01"))
- FnName = FnName.substr(1);
- GlobalVarName += FnName;
+ llvm::StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
- std::string FunctionName =
- PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl);
+ const Decl *CurDecl = CurCodeDecl;
+ if (CurDecl == 0)
+ CurDecl = getContext().getTranslationUnitDecl();
- llvm::Constant *C =
- CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
- return LValue::MakeAddr(C, Qualifiers());
-}
+ std::string FunctionName =
+ PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl);
-LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
- switch (E->getIdentType()) {
- default:
- return EmitUnsupportedLValue(E, "predefined expression");
- case PredefinedExpr::Func:
- case PredefinedExpr::Function:
- case PredefinedExpr::PrettyFunction:
- return EmitPredefinedFunctionName(E->getIdentType());
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return MakeAddrLValue(C, E->getType());
+ }
}
}
@@ -1315,10 +1311,9 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
// If we are not optimzing, don't collapse all calls to trap in the function
// to the same call, that way, in the debugger they can see which operation
- // did in fact fail. If we are optimizing, we collpase all call to trap down
+ // did in fact fail. If we are optimizing, we collapse all calls to trap down
// to just one per function to save on codesize.
- if (GCO.OptimizationLevel
- && TrapBB)
+ if (GCO.OptimizationLevel && TrapBB)
return TrapBB;
llvm::BasicBlock *Cont = 0;
@@ -1345,7 +1340,7 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() {
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
// If this isn't just an array->pointer decay, bail out.
const CastExpr *CE = dyn_cast<CastExpr>(E);
- if (CE == 0 || CE->getCastKind() != CastExpr::CK_ArrayToPointerDecay)
+ if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
return 0;
// If this is a decay from variable width array, bail out.
@@ -1382,7 +1377,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (CatchUndefined) {
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
- if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) {
+ if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
if (const ConstantArrayType *CAT
= getContext().getAsConstantArrayType(DRE->getType())) {
llvm::APInt Size = CAT->getSize();
@@ -1454,13 +1449,12 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
- Qualifiers Quals = MakeQualifiers(T);
- Quals.setAddressSpace(E->getBase()->getType().getAddressSpace());
+ LValue LV = MakeAddrLValue(Address, T);
+ LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- LValue LV = LValue::MakeAddr(Address, Quals);
if (getContext().getLangOptions().ObjC1 &&
getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
- LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
return LV;
@@ -1489,9 +1483,8 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
// it.
llvm::Value *Ptr = EmitScalarExpr(E->getBase());
const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
- Qualifiers Quals = MakeQualifiers(PT->getPointeeType());
- Quals.removeObjCGCAttr();
- Base = LValue::MakeAddr(Ptr, Quals);
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base.getQuals().removeObjCGCAttr();
} else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
// Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
// emit the base as an lvalue.
@@ -1506,7 +1499,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
// Store the vector to memory (because LValue wants an address).
llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
Builder.CreateStore(Vec, VecMem);
- Base = LValue::MakeAddr(VecMem, Qualifiers());
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType());
}
// Encode the element access list into a vector of unsigned indices.
@@ -1566,7 +1559,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
LValue LV = EmitLValueForField(BaseValue, Field,
BaseQuals.getCVRQualifiers());
- LValue::SetObjCNonGC(LV, isNonGC);
+ LV.setNonGC(isNonGC);
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
@@ -1645,13 +1638,15 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
if (Field->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- Qualifiers Quals = MakeQualifiers(Field->getType());
- Quals.addCVRQualifiers(CVRQualifiers);
+ unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ LValue LV = MakeAddrLValue(V, Field->getType(), Alignment);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+
// __weak attribute on a field is ignored.
- if (Quals.getObjCGCAttr() == Qualifiers::Weak)
- Quals.removeObjCGCAttr();
+ if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
+ LV.getQuals().removeObjCGCAttr();
- return LValue::MakeAddr(V, Quals);
+ return LV;
}
LValue
@@ -1670,13 +1665,14 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
- return LValue::MakeAddr(V, MakeQualifiers(FieldType));
+ unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ return MakeAddrLValue(V, FieldType, Alignment);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr* InitExpr = E->getInitializer();
- LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType());
EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false);
@@ -1729,7 +1725,7 @@ CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
EmitBlock(ContBlock);
Temp = Builder.CreateLoad(Temp, "lv");
- return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(Temp, E->getType());
}
// ?: here should be an aggregate.
@@ -1749,35 +1745,65 @@ CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
/// cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
- default:
+ case CK_ToVoid:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
-
- case CastExpr::CK_Dynamic: {
+
+ case CK_NoOp:
+ if (E->getSubExpr()->Classify(getContext()).getKind()
+ != Expr::Classification::CL_PRValue) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
+ assert(!RV.isScalar() && "EmitCastLValue-scalar cast of property ref");
+ llvm::Value *V = RV.getAggregateAddr();
+ return MakeAddrLValue(V, QT);
+ }
+ return LV;
+ }
+ // Fall through to synthesize a temporary.
+
+ case CK_Unknown:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_AnyPointerToBlockPointerCast: {
+ // These casts only produce lvalues when we're binding a reference to a
+ // temporary realized from a (converted) pure rvalue. Emit the expression
+ // as a value, copy it into a temporary, and return an lvalue referring to
+ // that temporary.
+ llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
+ EmitAnyExprToMem(E, V, false, false);
+ return MakeAddrLValue(V, E->getType());
+ }
+
+ case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
llvm::Value *V = LV.getAddress();
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
- return LValue::MakeAddr(EmitDynamicCast(V, DCE),
- MakeQualifiers(E->getType()));
+ return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
- case CastExpr::CK_NoOp: {
- LValue LV = EmitLValue(E->getSubExpr());
- if (LV.isPropertyRef()) {
- QualType QT = E->getSubExpr()->getType();
- RValue RV = EmitLoadOfPropertyRefLValue(LV, QT);
- assert(!RV.isScalar() && "EmitCastLValue - scalar cast of property ref");
- llvm::Value *V = RV.getAggregateAddr();
- return LValue::MakeAddr(V, MakeQualifiers(QT));
- }
- return LV;
- }
- case CastExpr::CK_ConstructorConversion:
- case CastExpr::CK_UserDefinedConversion:
- case CastExpr::CK_AnyPointerToObjCPointerCast:
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_AnyPointerToObjCPointerCast:
return EmitLValue(E->getSubExpr());
- case CastExpr::CK_UncheckedDerivedToBase:
- case CastExpr::CK_DerivedToBase: {
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
const RecordType *DerivedClassTy =
E->getSubExpr()->getType()->getAs<RecordType>();
CXXRecordDecl *DerivedClassDecl =
@@ -1785,8 +1811,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
llvm::Value *This;
- if (LV.isPropertyRef()) {
- RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getSubExpr()->getType());
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
assert (!RV.isScalar() && "EmitCastLValue");
This = RV.getAggregateAddr();
}
@@ -1796,13 +1825,14 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the derived-to-base conversion
llvm::Value *Base =
GetAddressOfBaseClass(This, DerivedClassDecl,
- E->getBasePath(), /*NullCheckValue=*/false);
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
- return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(Base, E->getType());
}
- case CastExpr::CK_ToUnion:
+ case CK_ToUnion:
return EmitAggExprToLValue(E);
- case CastExpr::CK_BaseToDerived: {
+ case CK_BaseToDerived: {
const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
@@ -1812,26 +1842,36 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the base-to-derived conversion
llvm::Value *Derived =
GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
- E->getBasePath(),/*NullCheckValue=*/false);
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
- return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(Derived, E->getType());
}
- case CastExpr::CK_LValueBitCast: {
+ case CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
LValue LV = EmitLValue(E->getSubExpr());
llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
ConvertType(CE->getTypeAsWritten()));
- return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(V, E->getType());
}
+ case CK_ObjCObjectLValueCast: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ToType = getContext().getLValueReferenceType(E->getType());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(ToType));
+ return MakeAddrLValue(V, E->getType());
}
+ }
+
+ llvm_unreachable("Unhandled lvalue cast kind?");
}
LValue CodeGenFunction::EmitNullInitializationLValue(
const CXXScalarValueInitExpr *E) {
QualType Ty = E->getType();
- LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
+ LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
EmitNullInitialization(LV.getAddress(), Ty);
return LV;
}
@@ -1881,28 +1921,26 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
// Comma expressions just emit their LHS then their RHS as an l-value.
- if (E->getOpcode() == BinaryOperator::Comma) {
+ if (E->getOpcode() == BO_Comma) {
EmitAnyExpr(E->getLHS());
EnsureInsertPoint();
return EmitLValue(E->getRHS());
}
- if (E->getOpcode() == BinaryOperator::PtrMemD ||
- E->getOpcode() == BinaryOperator::PtrMemI)
+ if (E->getOpcode() == BO_PtrMemD ||
+ E->getOpcode() == BO_PtrMemI)
return EmitPointerToDataMemberBinaryExpr(E);
// Can only get l-value for binary operator expressions which are a
// simple assignment of aggregate type.
- if (E->getOpcode() != BinaryOperator::Assign)
+ if (E->getOpcode() != BO_Assign)
return EmitUnsupportedLValue(E, "binary l-value expression");
if (!hasAggregateLLVMType(E->getType())) {
// Emit the LHS as an l-value.
LValue LV = EmitLValue(E->getLHS());
-
- llvm::Value *RHS = EmitScalarExpr(E->getRHS());
- EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(),
- E->getType());
+ // Store the value through the l-value.
+ EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType());
return LV;
}
@@ -1913,13 +1951,13 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
RValue RV = EmitCallExpr(E);
if (!RV.isScalar())
- return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType()));
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
assert(E->getCallReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
@@ -1930,13 +1968,12 @@ LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
EmitCXXConstructExpr(Temp, E);
- return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(Temp, E->getType());
}
LValue
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
- llvm::Value *Temp = EmitCXXTypeidExpr(E);
- return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
}
LValue
@@ -1950,20 +1987,19 @@ LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
if (!RV.isScalar())
- return LValue::MakeAddr(RV.getAggregateAddr(),
- MakeQualifiers(E->getType()));
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
- return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
llvm::Value *V =
CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
- return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ return MakeAddrLValue(V, E->getType());
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -2025,7 +2061,7 @@ LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
- return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
@@ -2054,20 +2090,19 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
LValue CodeGenFunction::
EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
llvm::Value *BaseV;
- if (E->getOpcode() == BinaryOperator::PtrMemI)
+ if (E->getOpcode() == BO_PtrMemI)
BaseV = EmitScalarExpr(E->getLHS());
else
BaseV = EmitLValue(E->getLHS()).getAddress();
- const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext());
- BaseV = Builder.CreateBitCast(BaseV, i8Ty);
+
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
- llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr");
- QualType Ty = E->getRHS()->getType();
- Ty = Ty->getAs<MemberPointerType>()->getPointeeType();
-
- const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty));
- AddV = Builder.CreateBitCast(AddV, PType);
- return LValue::MakeAddr(AddV, MakeQualifiers(Ty));
+ const MemberPointerType *MPT
+ = E->getRHS()->getType()->getAs<MemberPointerType>();
+
+ llvm::Value *AddV =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
+
+ return MakeAddrLValue(AddV, MPT->getPointeeType());
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 219a5f915329..28c8b3545b38 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -108,7 +108,6 @@ public:
void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
void VisitBinAssign(const BinaryOperator *E);
void VisitBinComma(const BinaryOperator *E);
- void VisitUnaryAddrOf(const UnaryOperator *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
@@ -193,10 +192,18 @@ void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
assert(Src.isAggregate() && "value must be aggregate value!");
- // If the result is ignored, don't copy from the value.
+ // If DestPtr is null, then we're evaluating an aggregate expression
+ // in a context (like an expression statement) that doesn't care
+ // about the result. C says that an lvalue-to-rvalue conversion is
+ // performed in these cases; C++ says that it is not. In either
+ // case, we don't actually need to do anything unless the value is
+ // volatile.
if (DestPtr == 0) {
- if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
+ if (!Src.isVolatileQualified() ||
+ CGF.CGM.getLangOptions().CPlusPlus ||
+ (IgnoreResult && Ignore))
return;
+
// If the source is volatile, we must read from it; to do that, we need
// some place to put it.
DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
@@ -235,7 +242,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
//===----------------------------------------------------------------------===//
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
- if (!DestPtr && E->getCastKind() != CastExpr::CK_Dynamic) {
+ if (!DestPtr && E->getCastKind() != CK_Dynamic) {
Visit(E->getSubExpr());
return;
}
@@ -243,7 +250,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
default: assert(0 && "Unhandled cast kind!");
- case CastExpr::CK_Dynamic: {
+ case CK_Dynamic: {
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
// FIXME: Do we also need to handle property references here?
@@ -257,105 +264,39 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
- case CastExpr::CK_ToUnion: {
+ case CK_ToUnion: {
// GCC union extension
- QualType PtrTy =
- CGF.getContext().getPointerType(E->getSubExpr()->getType());
+ QualType Ty = E->getSubExpr()->getType();
+ QualType PtrTy = CGF.getContext().getPointerType(Ty);
llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
CGF.ConvertType(PtrTy));
- EmitInitializationToLValue(E->getSubExpr(),
- LValue::MakeAddr(CastPtr, Qualifiers()),
- E->getSubExpr()->getType());
+ EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
+ Ty);
break;
}
- case CastExpr::CK_DerivedToBase:
- case CastExpr::CK_BaseToDerived:
- case CastExpr::CK_UncheckedDerivedToBase: {
+ case CK_DerivedToBase:
+ case CK_BaseToDerived:
+ case CK_UncheckedDerivedToBase: {
assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
"should have been unpacked before we got here");
break;
}
// FIXME: Remove the CK_Unknown check here.
- case CastExpr::CK_Unknown:
- case CastExpr::CK_NoOp:
- case CastExpr::CK_UserDefinedConversion:
- case CastExpr::CK_ConstructorConversion:
+ case CK_Unknown:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
E->getType()) &&
"Implicit cast types must be compatible");
Visit(E->getSubExpr());
break;
- case CastExpr::CK_NullToMemberPointer: {
- // If the subexpression's type is the C++0x nullptr_t, emit the
- // subexpression, which may have side effects.
- if (E->getSubExpr()->getType()->isNullPtrType())
- Visit(E->getSubExpr());
-
- const llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
- llvm::Value *NullValue = llvm::Constant::getNullValue(PtrDiffTy);
- llvm::Value *Ptr = Builder.CreateStructGEP(DestPtr, 0, "ptr");
- Builder.CreateStore(NullValue, Ptr, VolatileDest);
-
- llvm::Value *Adj = Builder.CreateStructGEP(DestPtr, 1, "adj");
- Builder.CreateStore(NullValue, Adj, VolatileDest);
-
- break;
- }
-
- case CastExpr::CK_LValueBitCast:
+ case CK_LValueBitCast:
llvm_unreachable("there are no lvalue bit-casts on aggregates");
break;
-
- case CastExpr::CK_BitCast: {
- // This must be a member function pointer cast.
- Visit(E->getSubExpr());
- break;
- }
-
- case CastExpr::CK_DerivedToBaseMemberPointer:
- case CastExpr::CK_BaseToDerivedMemberPointer: {
- QualType SrcType = E->getSubExpr()->getType();
-
- llvm::Value *Src = CGF.CreateMemTemp(SrcType, "tmp");
- CGF.EmitAggExpr(E->getSubExpr(), Src, SrcType.isVolatileQualified());
-
- llvm::Value *SrcPtr = Builder.CreateStructGEP(Src, 0, "src.ptr");
- SrcPtr = Builder.CreateLoad(SrcPtr);
-
- llvm::Value *SrcAdj = Builder.CreateStructGEP(Src, 1, "src.adj");
- SrcAdj = Builder.CreateLoad(SrcAdj);
-
- llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
- Builder.CreateStore(SrcPtr, DstPtr, VolatileDest);
-
- llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
-
- // Now See if we need to update the adjustment.
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()->
- getClass()->getAs<RecordType>()->getDecl());
- const CXXRecordDecl *DerivedDecl =
- cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
- getClass()->getAs<RecordType>()->getDecl());
- if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- std::swap(DerivedDecl, BaseDecl);
-
- if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, E->getBasePath())) {
- if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj");
- else
- SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj");
- }
-
- Builder.CreateStore(SrcAdj, DstAdj, VolatileDest);
- break;
- }
}
}
@@ -391,42 +332,12 @@ void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
/*IgnoreResult=*/false, IsInitializer);
}
-void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
- // We have a member function pointer.
- const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
- (void) MPT;
- assert(MPT->getPointeeType()->isFunctionProtoType() &&
- "Unexpected member pointer type!");
-
- // The creation of member function pointers has no side effects; if
- // there is no destination pointer, we have nothing to do.
- if (!DestPtr)
- return;
-
- const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
- const CXXMethodDecl *MD =
- cast<CXXMethodDecl>(DRE->getDecl())->getCanonicalDecl();
-
- const llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
- llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
- llvm::Value *FuncPtr = CGF.CGM.GetCXXMemberFunctionPointerValue(MD);
- Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
-
- llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
- // The adjustment will always be 0.
- Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
- VolatileDest);
-}
-
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
}
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() == BinaryOperator::PtrMemD ||
- E->getOpcode() == BinaryOperator::PtrMemI)
+ if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
VisitPointerToDataMemberBinaryOperator(E);
else
CGF.ErrorUnsupported(E, "aggregate binary expression");
@@ -519,7 +430,7 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return;
}
- EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, Qualifiers()));
+ EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -546,12 +457,6 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
if (!Val) // Create a temporary variable.
Val = CGF.CreateMemTemp(E->getType(), "tmp");
- if (E->requiresZeroInitialization())
- EmitNullInitializationToLValue(LValue::MakeAddr(Val,
- // FIXME: Qualifiers()?
- E->getType().getQualifiers()),
- E->getType());
-
CGF.EmitCXXConstructExpr(Val, E);
}
@@ -568,8 +473,8 @@ void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
// Create a temporary variable.
Val = CGF.CreateMemTemp(E->getType(), "tmp");
}
- LValue LV = LValue::MakeAddr(Val, Qualifiers());
- EmitNullInitializationToLValue(LV, E->getType());
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
+ E->getType());
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
@@ -579,8 +484,8 @@ void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
// Create a temporary variable.
Val = CGF.CreateMemTemp(E->getType(), "tmp");
}
- LValue LV = LValue::MakeAddr(Val, Qualifiers());
- EmitNullInitializationToLValue(LV, E->getType());
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Val, E->getType()),
+ E->getType());
}
void
@@ -625,7 +530,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
- EmitFinalDestCopy(E, LValue::MakeAddr(GV, Qualifiers()));
+ EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
return;
}
#endif
@@ -656,17 +561,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
// FIXME: were we intentionally ignoring address spaces and GC attributes?
- Qualifiers Quals = CGF.MakeQualifiers(ElementType);
for (uint64_t i = 0; i != NumArrayElements; ++i) {
llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+ LValue LV = CGF.MakeAddrLValue(NextVal, ElementType);
if (i < NumInitElements)
- EmitInitializationToLValue(E->getInit(i),
- LValue::MakeAddr(NextVal, Quals),
- ElementType);
+ EmitInitializationToLValue(E->getInit(i), LV, ElementType);
+
else
- EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, Quals),
- ElementType);
+ EmitNullInitializationToLValue(LV, ElementType);
}
return;
}
@@ -679,8 +582,17 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
- unsigned CurInitVal = 0;
-
+
+ // If we're initializing the whole aggregate, just do it in place.
+ // FIXME: This is a hack around an AST bug (PR6537).
+ if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
+ EmitInitializationToLValue(E->getInit(0),
+ CGF.MakeAddrLValue(DestPtr, E->getType()),
+ E->getType());
+ return;
+ }
+
+
if (E->getType()->isUnionType()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
@@ -712,19 +624,10 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
-
- // If we're initializing the whole aggregate, just do it in place.
- // FIXME: This is a hack around an AST bug (PR6537).
- if (NumInitElements == 1 && E->getType() == E->getInit(0)->getType()) {
- EmitInitializationToLValue(E->getInit(0),
- LValue::MakeAddr(DestPtr, Qualifiers()),
- E->getType());
- return;
- }
-
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
+ unsigned CurInitVal = 0;
for (RecordDecl::field_iterator Field = SD->field_begin(),
FieldEnd = SD->field_end();
Field != FieldEnd; ++Field) {
@@ -738,7 +641,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// FIXME: volatility
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
// We never generate write-barries for initialized fields.
- LValue::SetObjCNonGC(FieldLoc, true);
+ FieldLoc.setNonGC(true);
if (CurInitVal < NumInitElements) {
// Store the initializer into the field.
EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc,
@@ -776,10 +679,10 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
- Qualifiers Q = MakeQualifiers(E->getType());
llvm::Value *Temp = CreateMemTemp(E->getType());
- EmitAggExpr(E, Temp, Q.hasVolatile());
- return LValue::MakeAddr(Temp, Q);
+ LValue LV = MakeAddrLValue(Temp, E->getType());
+ EmitAggExpr(E, Temp, LV.isVolatileQualified());
+ return LV;
}
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 69e5f0ef4be8..9a98281771fa 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -12,7 +12,9 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "llvm/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
@@ -91,14 +93,9 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
return EmitCall(getContext().getPointerType(MD->getType()), Callee,
ReturnValue, CE->arg_begin(), CE->arg_end());
}
-
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
+ // Compute the object pointer.
llvm::Value *This;
-
if (ME->isArrow())
This = EmitScalarExpr(ME->getBase());
else {
@@ -106,7 +103,10 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
This = BaseLV.getAddress();
}
- if (MD->isCopyAssignment() && MD->isTrivial()) {
+ if (MD->isTrivial()) {
+ if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
+
+ assert(MD->isCopyAssignment() && "unknown trivial member function");
// We don't like to generate the trivial copy assignment operator when
// it isn't necessary; just produce the proper effect here.
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
@@ -114,25 +114,34 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
return RValue::get(This);
}
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo =
+ (isa<CXXDestructorDecl>(MD)
+ ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete)
+ : CGM.getTypes().getFunctionInfo(MD));
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty
+ = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
+
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
+ bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(ME->getBase());
+
llvm::Value *Callee;
- if (const CXXDestructorDecl *Destructor
- = dyn_cast<CXXDestructorDecl>(MD)) {
- if (Destructor->isTrivial())
- return RValue::get(0);
- if (MD->isVirtual() && !ME->hasQualifier() &&
- !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
- Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty);
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (UseVirtualCall) {
+ Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
} else {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
}
- } else if (MD->isVirtual() && !ME->hasQualifier() &&
- !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+ } else if (UseVirtualCall) {
Callee = BuildVirtualCall(MD, This, Ty);
} else {
Callee = CGM.GetAddrOfFunction(MD, Ty);
@@ -152,89 +161,27 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
const MemberPointerType *MPT =
MemFnExpr->getType()->getAs<MemberPointerType>();
+
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- const llvm::FunctionType *FTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
- FPT->isVariadic());
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
// Get the member function pointer.
- llvm::Value *MemFnPtr = CreateMemTemp(MemFnExpr->getType(), "mem.fn");
- EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
// Emit the 'this' pointer.
llvm::Value *This;
- if (BO->getOpcode() == BinaryOperator::PtrMemI)
+ if (BO->getOpcode() == BO_PtrMemI)
This = EmitScalarExpr(BaseExpr);
else
This = EmitLValue(BaseExpr).getAddress();
-
- // Adjust it.
- llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
- Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
-
- llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
- Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
-
- This = Builder.CreateBitCast(Ptr, This->getType(), "this");
-
- llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
-
- const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
- llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
-
- // If the LSB in the function pointer is 1, the function pointer points to
- // a virtual function.
- llvm::Value *IsVirtual
- = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
- "and");
-
- IsVirtual = Builder.CreateTrunc(IsVirtual,
- llvm::Type::getInt1Ty(VMContext));
+ // Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *Callee =
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
- llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
- llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
- llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
-
- Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
- EmitBlock(FnVirtual);
-
- const llvm::Type *VTableTy =
- FTy->getPointerTo()->getPointerTo();
-
- llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
- VTable = Builder.CreateLoad(VTable);
-
- VTable = Builder.CreateBitCast(VTable, Int8PtrTy);
- llvm::Value *VTableOffset =
- Builder.CreateSub(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1));
-
- VTable = Builder.CreateGEP(VTable, VTableOffset, "fn");
- VTable = Builder.CreateBitCast(VTable, VTableTy);
-
- llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
-
- EmitBranch(FnEnd);
- EmitBlock(FnNonVirtual);
-
- // If the function is not virtual, just load the pointer.
- llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
- NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
-
- EmitBlock(FnEnd);
-
- llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
- Callee->reserveOperandSpace(2);
- Callee->addIncoming(VirtualFn, FnVirtual);
- Callee->addIncoming(NonVirtualFn, FnNonVirtual);
-
CallArgList Args;
QualType ThisType =
@@ -263,11 +210,17 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
"EmitCXXOperatorMemberCallExpr - user declared copy assignment");
LValue LV = EmitLValue(E->getArg(0));
llvm::Value *This;
- if (LV.isPropertyRef()) {
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
- EmitObjCPropertySet(LV.getPropertyRefExpr(),
- RValue::getAggregate(AggLoc, false /*VolatileDest*/));
+ if (LV.isPropertyRef())
+ EmitObjCPropertySet(LV.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc,
+ false /*VolatileDest*/));
+ else
+ EmitObjCPropertySet(LV.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc,
+ false /*VolatileDest*/));
return RValue::getAggregate(0, false);
}
else
@@ -286,8 +239,11 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
FPT->isVariadic());
LValue LV = EmitLValue(E->getArg(0));
llvm::Value *This;
- if (LV.isPropertyRef()) {
- RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getArg(0)->getType());
+ if (LV.isPropertyRef() || LV.isKVCRef()) {
+ QualType QT = E->getArg(0)->getType();
+ RValue RV =
+ LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
+ : EmitLoadOfKVCRefLValue(LV, QT);
assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
This = RV.getAggregateAddr();
}
@@ -309,19 +265,18 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const CXXConstructExpr *E) {
assert(Dest && "Must have a destination!");
const CXXConstructorDecl *CD = E->getConstructor();
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(E->getType());
- // For a copy constructor, even if it is trivial, must fall thru so
- // its argument is code-gen'ed.
- if (!CD->isCopyConstructor()) {
- QualType InitType = E->getType();
- if (Array)
- InitType = getContext().getBaseElementType(Array);
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
- if (RD->hasTrivialConstructor())
- return;
- }
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+
+ // If this is a call to a trivial default constructor, do nothing.
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return;
+
// Code gen optimization to eliminate copy constructor and return
// its first argument instead, if in fact that argument is a temporary
// object.
@@ -331,6 +286,9 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
return;
}
}
+
+ const ConstantArrayType *Array
+ = getContext().getAsConstantArrayType(E->getType());
if (Array) {
QualType BaseElementTy = getContext().getBaseElementType(Array);
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
@@ -354,131 +312,205 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
}
}
-static CharUnits CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
- const RecordType *RT = ElementType->getAs<RecordType>();
- if (!RT)
- return CharUnits::Zero();
-
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return CharUnits::Zero();
-
- // Check if the class has a trivial destructor.
- if (RD->hasTrivialDestructor()) {
- // Check if the usual deallocation function takes two arguments.
- const CXXMethodDecl *UsualDeallocationFunction = 0;
-
- DeclarationName OpName =
- Ctx.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
- DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = RD->lookup(OpName);
- Op != OpEnd; ++Op) {
- const CXXMethodDecl *Delete = cast<CXXMethodDecl>(*Op);
-
- if (Delete->isUsualDeallocationFunction()) {
- UsualDeallocationFunction = Delete;
- break;
- }
- }
-
- // No usual deallocation function, we don't need a cookie.
- if (!UsualDeallocationFunction)
- return CharUnits::Zero();
-
- // The usual deallocation function doesn't take a size_t argument, so we
- // don't need a cookie.
- if (UsualDeallocationFunction->getNumParams() == 1)
- return CharUnits::Zero();
-
- assert(UsualDeallocationFunction->getNumParams() == 2 &&
- "Unexpected deallocation function type!");
- }
-
- // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
- return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
- Ctx.getTypeAlignInChars(ElementType));
+/// Check whether the given operator new[] is the global placement
+/// operator new[].
+static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
+ const FunctionDecl *Fn) {
+ // Must be in global scope. Note that allocation functions can't be
+ // declared in namespaces.
+ if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
+ return false;
+
+ // Signature must be void *operator new[](size_t, void*).
+ // The size_t is common to all operator new[]s.
+ if (Fn->getNumParams() != 2)
+ return false;
+
+ CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
+ return (ParamType == Ctx.VoidPtrTy);
}
-static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
+static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
+ const CXXNewExpr *E) {
if (!E->isArray())
return CharUnits::Zero();
// No cookie is required if the new operator being used is
// ::operator new[](size_t, void*).
const FunctionDecl *OperatorNew = E->getOperatorNew();
- if (OperatorNew->getDeclContext()->getLookupContext()->isFileContext()) {
- if (OperatorNew->getNumParams() == 2) {
- CanQualType ParamType =
- Ctx.getCanonicalType(OperatorNew->getParamDecl(1)->getType());
-
- if (ParamType == Ctx.VoidPtrTy)
- return CharUnits::Zero();
- }
- }
-
- return CalculateCookiePadding(Ctx, E->getAllocatedType());
+ if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
+ return CharUnits::Zero();
+
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
}
static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
- CodeGenFunction &CGF,
+ CodeGenFunction &CGF,
const CXXNewExpr *E,
- llvm::Value *& NumElements) {
- QualType Type = E->getAllocatedType();
- CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(Type);
- const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
-
- if (!E->isArray())
- return llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+ llvm::Value *&NumElements,
+ llvm::Value *&SizeWithoutCookie) {
+ QualType ElemType = E->getAllocatedType();
- CharUnits CookiePadding = CalculateCookiePadding(CGF.getContext(), E);
+ const llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
- Expr::EvalResult Result;
- if (E->getArraySize()->Evaluate(Result, CGF.getContext()) &&
- !Result.HasSideEffects && Result.Val.isInt()) {
+ CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
- CharUnits AllocSize =
- Result.Val.getInt().getZExtValue() * TypeSize + CookiePadding;
-
- NumElements =
- llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue());
- while (const ArrayType *AType = Context.getAsArrayType(Type)) {
- const llvm::ArrayType *llvmAType =
- cast<llvm::ArrayType>(CGF.ConvertType(Type));
- NumElements =
- CGF.Builder.CreateMul(NumElements,
- llvm::ConstantInt::get(
- SizeTy, llvmAType->getNumElements()));
- Type = AType->getElementType();
- }
-
- return llvm::ConstantInt::get(SizeTy, AllocSize.getQuantity());
+ if (!E->isArray()) {
+ SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+ return SizeWithoutCookie;
}
-
+
+ // Figure out the cookie size.
+ CharUnits CookieSize = CalculateCookiePadding(CGF, E);
+
// Emit the array size expression.
+ // We multiply the size of all dimensions for NumElements.
+ // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
NumElements = CGF.EmitScalarExpr(E->getArraySize());
-
- // Multiply with the type size.
- llvm::Value *V =
- CGF.Builder.CreateMul(NumElements,
- llvm::ConstantInt::get(SizeTy,
- TypeSize.getQuantity()));
-
- while (const ArrayType *AType = Context.getAsArrayType(Type)) {
- const llvm::ArrayType *llvmAType =
- cast<llvm::ArrayType>(CGF.ConvertType(Type));
- NumElements =
- CGF.Builder.CreateMul(NumElements,
- llvm::ConstantInt::get(
- SizeTy, llvmAType->getNumElements()));
- Type = AType->getElementType();
+ assert(NumElements->getType() == SizeTy && "element count not a size_t");
+
+ uint64_t ArraySizeMultiplier = 1;
+ while (const ConstantArrayType *CAT
+ = CGF.getContext().getAsConstantArrayType(ElemType)) {
+ ElemType = CAT->getElementType();
+ ArraySizeMultiplier *= CAT->getSize().getZExtValue();
}
- // And add the cookie padding if necessary.
- if (!CookiePadding.isZero())
- V = CGF.Builder.CreateAdd(V,
- llvm::ConstantInt::get(SizeTy, CookiePadding.getQuantity()));
-
- return V;
+ llvm::Value *Size;
+
+ // If someone is doing 'new int[42]' there is no need to do a dynamic check.
+ // Don't bloat the -O0 code.
+ if (llvm::ConstantInt *NumElementsC =
+ dyn_cast<llvm::ConstantInt>(NumElements)) {
+ llvm::APInt NEC = NumElementsC->getValue();
+ unsigned SizeWidth = NEC.getBitWidth();
+
+ // Determine if there is an overflow here by doing an extended multiply.
+ NEC.zext(SizeWidth*2);
+ llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
+ SC *= NEC;
+
+ if (!CookieSize.isZero()) {
+ // Save the current size without a cookie. We don't care if an
+ // overflow's already happened because SizeWithoutCookie isn't
+ // used if the allocator returns null or throws, as it should
+ // always do on an overflow.
+ llvm::APInt SWC = SC;
+ SWC.trunc(SizeWidth);
+ SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
+
+ // Add the cookie size.
+ SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
+ }
+
+ if (SC.countLeadingZeros() >= SizeWidth) {
+ SC.trunc(SizeWidth);
+ Size = llvm::ConstantInt::get(SizeTy, SC);
+ } else {
+ // On overflow, produce a -1 so operator new throws.
+ Size = llvm::Constant::getAllOnesValue(SizeTy);
+ }
+
+ // Scale NumElements while we're at it.
+ uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
+ NumElements = llvm::ConstantInt::get(SizeTy, N);
+
+ // Otherwise, we don't need to do an overflow-checked multiplication if
+ // we're multiplying by one.
+ } else if (TypeSize.isOne()) {
+ assert(ArraySizeMultiplier == 1);
+
+ Size = NumElements;
+
+ // If we need a cookie, add its size in with an overflow check.
+ // This is maybe a little paranoid.
+ if (!CookieSize.isZero()) {
+ SizeWithoutCookie = Size;
+
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+
+ const llvm::Type *Types[] = { SizeTy };
+ llvm::Value *UAddF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
+ llvm::Value *AddRes
+ = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
+
+ Size = CGF.Builder.CreateExtractValue(AddRes, 0);
+ llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
+ Size = CGF.Builder.CreateSelect(DidOverflow,
+ llvm::ConstantInt::get(SizeTy, -1),
+ Size);
+ }
+
+ // Otherwise use the int.umul.with.overflow intrinsic.
+ } else {
+ llvm::Value *OutermostElementSize
+ = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+
+ llvm::Value *NumOutermostElements = NumElements;
+
+ // Scale NumElements by the array size multiplier. This might
+ // overflow, but only if the multiplication below also overflows,
+ // in which case this multiplication isn't used.
+ if (ArraySizeMultiplier != 1)
+ NumElements = CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
+
+ // The requested size of the outermost array is non-constant.
+ // Multiply that by the static size of the elements of that array;
+ // on unsigned overflow, set the size to -1 to trigger an
+ // exception from the allocation routine. This is sufficient to
+ // prevent buffer overruns from the allocator returning a
+ // seemingly valid pointer to insufficient space. This idea comes
+ // originally from MSVC, and GCC has an open bug requesting
+ // similar behavior:
+ // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
+ //
+ // This will not be sufficient for C++0x, which requires a
+ // specific exception class (std::bad_array_new_length).
+ // That will require ABI support that has not yet been specified.
+ const llvm::Type *Types[] = { SizeTy };
+ llvm::Value *UMulF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
+ llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
+ OutermostElementSize);
+
+ // The overflow bit.
+ llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
+
+ // The result of the multiplication.
+ Size = CGF.Builder.CreateExtractValue(MulRes, 0);
+
+ // If we have a cookie, we need to add that size in, too.
+ if (!CookieSize.isZero()) {
+ SizeWithoutCookie = Size;
+
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ llvm::Value *UAddF
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
+ llvm::Value *AddRes
+ = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
+
+ Size = CGF.Builder.CreateExtractValue(AddRes, 0);
+
+ llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
+ DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
+ }
+
+ Size = CGF.Builder.CreateSelect(DidOverflow,
+ llvm::ConstantInt::get(SizeTy, -1),
+ Size);
+ }
+
+ if (CookieSize.isZero())
+ SizeWithoutCookie = Size;
+ else
+ assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
+
+ return Size;
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
@@ -489,10 +521,13 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
const Expr *Init = E->getConstructorArg(0);
QualType AllocType = E->getAllocatedType();
-
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
if (!CGF.hasAggregateLLVMType(AllocType))
CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
- AllocType.isVolatileQualified(), AllocType);
+ AllocType.isVolatileQualified(), Alignment,
+ AllocType);
else if (AllocType->isAnyComplexType())
CGF.EmitComplexExprIntoAddr(Init, NewPtr,
AllocType.isVolatileQualified());
@@ -554,18 +589,59 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
EmitBlock(AfterFor, true);
}
+static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
+ llvm::Value *NewPtr, llvm::Value *Size) {
+ llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+ if (NewPtr->getType() != BP)
+ NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
+
+ CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
+ llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
+ Size,
+ llvm::ConstantInt::get(CGF.Int32Ty,
+ CGF.getContext().getTypeAlign(T)/8),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
+ 0));
+}
+
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
- llvm::Value *NumElements) {
+ llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
if (E->isArray()) {
if (CXXConstructorDecl *Ctor = E->getConstructor()) {
- if (!Ctor->getParent()->hasTrivialConstructor())
- CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
- E->constructor_arg_begin(),
- E->constructor_arg_end());
+ bool RequiresZeroInitialization = false;
+ if (Ctor->getParent()->hasTrivialConstructor()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
+ return;
+
+ if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
+ AllocSizeWithoutCookie);
+ return;
+ }
+
+ RequiresZeroInitialization = true;
+ }
+
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end(),
+ RequiresZeroInitialization);
return;
- }
- else {
+ } else if (E->getNumConstructorArgs() == 1 &&
+ isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
+ AllocSizeWithoutCookie);
+ return;
+ } else {
CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
return;
}
@@ -595,6 +671,10 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType AllocType = E->getAllocatedType();
+ if (AllocType->isArrayType())
+ while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
+ AllocType = AType->getElementType();
+
FunctionDecl *NewFD = E->getOperatorNew();
const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
@@ -604,8 +684,10 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType SizeTy = getContext().getSizeType();
llvm::Value *NumElements = 0;
+ llvm::Value *AllocSizeWithoutCookie = 0;
llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
- *this, E, NumElements);
+ *this, E, NumElements,
+ AllocSizeWithoutCookie);
NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
@@ -654,112 +736,69 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
!(AllocType->isPODType() && !E->hasInitializer());
- llvm::BasicBlock *NewNull = 0;
+ llvm::BasicBlock *NullCheckSource = 0;
llvm::BasicBlock *NewNotNull = 0;
llvm::BasicBlock *NewEnd = 0;
llvm::Value *NewPtr = RV.getScalarVal();
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
if (NullCheckResult) {
- NewNull = createBasicBlock("new.null");
+ NullCheckSource = Builder.GetInsertBlock();
NewNotNull = createBasicBlock("new.notnull");
NewEnd = createBasicBlock("new.end");
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(NewPtr,
- llvm::Constant::getNullValue(NewPtr->getType()),
- "isnull");
-
- Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+ llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
+ Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
EmitBlock(NewNotNull);
}
- CharUnits CookiePadding = CalculateCookiePadding(getContext(), E);
- if (!CookiePadding.isZero()) {
- CharUnits CookieOffset =
- CookiePadding - getContext().getTypeSizeInChars(SizeTy);
-
- llvm::Value *NumElementsPtr =
- Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset.getQuantity());
-
- NumElementsPtr = Builder.CreateBitCast(NumElementsPtr,
- ConvertType(SizeTy)->getPointerTo());
- Builder.CreateStore(NumElements, NumElementsPtr);
-
- // Now add the padding to the new ptr.
- NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr,
- CookiePadding.getQuantity());
+ assert((AllocSize == AllocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (AllocSize != AllocSizeWithoutCookie) {
+ assert(E->isArray());
+ NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
+ AllocType);
}
-
- if (AllocType->isArrayType()) {
- while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
- AllocType = AType->getElementType();
- NewPtr =
- Builder.CreateBitCast(NewPtr,
- ConvertType(getContext().getPointerType(AllocType)));
- EmitNewInitializer(*this, E, NewPtr, NumElements);
- NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
- }
- else {
- NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
- EmitNewInitializer(*this, E, NewPtr, NumElements);
+
+ const llvm::Type *ElementPtrTy
+ = ConvertTypeForMem(AllocType)->getPointerTo(AS);
+ NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
+ if (E->isArray()) {
+ EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
+
+ // NewPtr is a pointer to the base element type. If we're
+ // allocating an array of arrays, we'll need to cast back to the
+ // array pointer type.
+ const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
+ if (NewPtr->getType() != ResultTy)
+ NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
+ } else {
+ EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
}
if (NullCheckResult) {
Builder.CreateBr(NewEnd);
- NewNotNull = Builder.GetInsertBlock();
- EmitBlock(NewNull);
- Builder.CreateBr(NewEnd);
+ llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
EmitBlock(NewEnd);
llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
PHI->reserveOperandSpace(2);
- PHI->addIncoming(NewPtr, NewNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+ PHI->addIncoming(NewPtr, NotNullSource);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
+ NullCheckSource);
NewPtr = PHI;
}
-
- return NewPtr;
-}
-
-static std::pair<llvm::Value *, llvm::Value *>
-GetAllocatedObjectPtrAndNumElements(CodeGenFunction &CGF,
- llvm::Value *Ptr, QualType DeleteTy) {
- QualType SizeTy = CGF.getContext().getSizeType();
- const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
-
- CharUnits DeleteTypeAlign = CGF.getContext().getTypeAlignInChars(DeleteTy);
- CharUnits CookiePadding =
- std::max(CGF.getContext().getTypeSizeInChars(SizeTy),
- DeleteTypeAlign);
- assert(!CookiePadding.isZero() && "CookiePadding should not be 0.");
-
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
- CharUnits CookieOffset =
- CookiePadding - CGF.getContext().getTypeSizeInChars(SizeTy);
-
- llvm::Value *AllocatedObjectPtr = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
- AllocatedObjectPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
- -CookiePadding.getQuantity());
-
- llvm::Value *NumElementsPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
- CookieOffset.getQuantity());
- NumElementsPtr =
- CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo());
-
- llvm::Value *NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
- NumElements =
- CGF.Builder.CreateIntCast(NumElements, SizeLTy, /*isSigned=*/false);
- return std::make_pair(AllocatedObjectPtr, NumElements);
+ return NewPtr;
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
llvm::Value *Ptr,
QualType DeleteTy) {
+ assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+
const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAs<FunctionProtoType>();
@@ -775,21 +814,6 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteTypeSize.getQuantity());
}
- if (DeleteFD->getOverloadedOperator() == OO_Array_Delete &&
- !CalculateCookiePadding(getContext(), DeleteTy).isZero()) {
- // We need to get the number of elements in the array from the cookie.
- llvm::Value *AllocatedObjectPtr;
- llvm::Value *NumElements;
- llvm::tie(AllocatedObjectPtr, NumElements) =
- GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
-
- // Multiply the size with the number of elements.
- if (Size)
- Size = Builder.CreateMul(NumElements, Size);
-
- Ptr = AllocatedObjectPtr;
- }
-
QualType ArgTy = DeleteFTy->getArgType(0);
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
@@ -803,20 +827,169 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs, DeleteFD);
}
+namespace {
+ /// Calls the given 'operator delete' on a single object.
+ struct CallObjectDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ QualType ElementType;
+
+ CallObjectDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ QualType ElementType)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+ }
+ };
+}
+
+/// Emit the code for deleting a single object.
+static void EmitObjectDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType) {
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *Dtor = 0;
+ if (const RecordType *RT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor()) {
+ Dtor = RD->getDestructor();
+
+ if (Dtor->isVirtual()) {
+ const llvm::Type *Ty =
+ CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
+ Dtor_Complete),
+ /*isVariadic=*/false);
+
+ llvm::Value *Callee
+ = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+ CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
+
+ // The dtor took care of deleting the object.
+ return;
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete, ElementType);
+
+ if (Dtor)
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Ptr);
+
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ /// Calls the given 'operator delete' on an array of objects.
+ struct CallArrayDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *NumElements;
+ QualType ElementType;
+ CharUnits CookieSize;
+
+ CallArrayDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *NumElements,
+ QualType ElementType,
+ CharUnits CookieSize)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
+ ElementType(ElementType), CookieSize(CookieSize) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ const FunctionProtoType *DeleteFTy =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
+
+ CallArgList Args;
+
+ // Pass the pointer as the first argument.
+ QualType VoidPtrTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr
+ = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
+ Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
+
+ // Pass the original requested size as the second argument.
+ if (DeleteFTy->getNumArgs() == 2) {
+ QualType size_t = DeleteFTy->getArgType(1);
+ const llvm::IntegerType *SizeTy
+ = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
+
+ CharUnits ElementTypeSize =
+ CGF.CGM.getContext().getTypeSizeInChars(ElementType);
+
+ // The size of an element, multiplied by the number of elements.
+ llvm::Value *Size
+ = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
+ Size = CGF.Builder.CreateMul(Size, NumElements);
+
+ // Plus the size of the cookie if applicable.
+ if (!CookieSize.isZero()) {
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
+ }
+
+ Args.push_back(std::make_pair(RValue::get(Size), size_t));
+ }
+
+ // Emit the call to delete.
+ CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), Args, OperatorDelete);
+ }
+ };
+}
+
+/// Emit the code for deleting an array of objects.
+static void EmitArrayDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType) {
+ llvm::Value *NumElements = 0;
+ llvm::Value *AllocatedPtr = 0;
+ CharUnits CookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
+ NumElements, AllocatedPtr, CookieSize);
+
+ assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
+
+ // Make sure that we call delete even if one of the dtors throws.
+ CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
+ AllocatedPtr, OperatorDelete,
+ NumElements, ElementType,
+ CookieSize);
+
+ if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
+ if (!RD->hasTrivialDestructor()) {
+ assert(NumElements && "ReadArrayCookie didn't find element count"
+ " for a class with destructor");
+ CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
+ }
+ }
+
+ CGF.PopCleanupBlock();
+}
+
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
// Get at the argument before we performed the implicit conversion
// to void*.
const Expr *Arg = E->getArgument();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
- if (ICE->getCastKind() != CastExpr::CK_UserDefinedConversion &&
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
ICE->getType()->isVoidPointerType())
Arg = ICE->getSubExpr();
else
break;
}
-
- QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
llvm::Value *Ptr = EmitScalarExpr(Arg);
@@ -830,41 +1003,38 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
-
- bool ShouldCallDelete = true;
-
- // Call the destructor if necessary.
- if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!RD->hasTrivialDestructor()) {
- const CXXDestructorDecl *Dtor = RD->getDestructor();
- if (E->isArrayForm()) {
- llvm::Value *AllocatedObjectPtr;
- llvm::Value *NumElements;
- llvm::tie(AllocatedObjectPtr, NumElements) =
- GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
-
- EmitCXXAggrDestructorCall(Dtor, NumElements, Ptr);
- } else if (Dtor->isVirtual()) {
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(Dtor),
- /*isVariadic=*/false);
-
- llvm::Value *Callee = BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
- EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
- 0, 0);
-
- // The dtor took care of deleting the object.
- ShouldCallDelete = false;
- } else
- EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
- Ptr);
- }
+
+ // We might be deleting a pointer to array. If so, GEP down to the
+ // first non-array element.
+ // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+ if (DeleteTy->isConstantArrayType()) {
+ llvm::Value *Zero = Builder.getInt32(0);
+ llvm::SmallVector<llvm::Value*,8> GEP;
+
+ GEP.push_back(Zero); // point at the outermost array
+
+ // For each layer of array type we're pointing at:
+ while (const ConstantArrayType *Arr
+ = getContext().getAsConstantArrayType(DeleteTy)) {
+ // 1. Unpeel the array type.
+ DeleteTy = Arr->getElementType();
+
+ // 2. GEP to the first element of the array.
+ GEP.push_back(Zero);
}
+
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
}
- if (ShouldCallDelete)
- EmitDeleteCall(E->getOperatorDelete(), Ptr, DeleteTy);
+ assert(ConvertTypeForMem(DeleteTy) ==
+ cast<llvm::PointerType>(Ptr->getType())->getElementType());
+
+ if (E->isArrayForm()) {
+ EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ } else {
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
+ }
EmitBlock(DeleteEnd);
}
@@ -895,7 +1065,7 @@ llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// FIXME: PointerType->hasAttr<NonNullAttr>()
bool CanBeZero = false;
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
- if (UO->getOpcode() == UnaryOperator::Deref)
+ if (UO->getOpcode() == UO_Deref)
CanBeZero = true;
if (CanBeZero) {
llvm::BasicBlock *NonZeroBlock = createBasicBlock();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 0927319db776..79e9dd42ee28 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -347,7 +347,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
// FIXME: We should be looking at all of the cast kinds here, not
// cherry-picking the ones we have test cases for.
- if (CK == CastExpr::CK_LValueBitCast) {
+ if (CK == CK_LValueBitCast) {
llvm::Value *V = CGF.EmitLValue(Op).getAddress();
V = Builder.CreateBitCast(V,
CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
@@ -532,7 +532,7 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// improve codegen a little. It is possible for the RHS to be complex or
// scalar.
OpInfo.Ty = E->getComputationResultType();
- OpInfo.RHS = EmitCast(CastExpr::CK_Unknown, E->getRHS(), OpInfo.Ty);
+ OpInfo.RHS = EmitCast(CK_Unknown, E->getRHS(), OpInfo.Ty);
LValue LHS = CGF.EmitLValue(E->getLHS());
// We know the LHS is a complex lvalue.
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index bbd256c4f1fa..9c31c2a3d538 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "clang/AST/APValue.h"
@@ -81,10 +82,6 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset,
assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
&& "Field offset mismatch!");
- // Emit the field.
- if (!InitCst)
- return false;
-
unsigned FieldAlignment = getAlignment(InitCst);
// Round up the field offset to the alignment of the field type.
@@ -360,6 +357,9 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) {
Field->getType(), CGF);
else
EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!EltInit)
+ return false;
if (!Field->isBitField()) {
// Handle non-bitfield members.
@@ -455,37 +455,15 @@ public:
return Visit(E->getInitializer());
}
- llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
- assert(MD->isInstance() && "Member function must not be static!");
-
- MD = MD->getCanonicalDecl();
-
- const llvm::Type *PtrDiffTy =
- CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
-
- llvm::Constant *Values[2];
-
- Values[0] = CGM.GetCXXMemberFunctionPointerValue(MD);
-
- // The adjustment will always be 0.
- Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
-
- return llvm::ConstantStruct::get(CGM.getLLVMContext(),
- Values, 2, /*Packed=*/false);
- }
-
llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
if (const MemberPointerType *MPT =
- E->getType()->getAs<MemberPointerType>()) {
- QualType T = MPT->getPointeeType();
+ E->getType()->getAs<MemberPointerType>()) {
DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
-
NamedDecl *ND = DRE->getDecl();
- if (T->isFunctionProtoType())
- return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
-
- // We have a pointer to data member.
- return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
+ if (MPT->isMemberFunctionPointer())
+ return CGM.getCXXABI().EmitMemberPointer(cast<CXXMethodDecl>(ND));
+ else
+ return CGM.getCXXABI().EmitMemberPointer(cast<FieldDecl>(ND));
}
return 0;
@@ -514,7 +492,7 @@ public:
llvm::Constant *VisitCastExpr(CastExpr* E) {
switch (E->getCastKind()) {
- case CastExpr::CK_ToUnion: {
+ case CK_ToUnion: {
// GCC cast to union extension
assert(E->getType()->isUnionType() &&
"Destination type is not union type!");
@@ -549,44 +527,21 @@ public:
llvm::StructType::get(C->getType()->getContext(), Types, false);
return llvm::ConstantStruct::get(STy, Elts);
}
- case CastExpr::CK_NullToMemberPointer:
- return CGM.EmitNullConstant(E->getType());
+ case CK_NullToMemberPointer: {
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
- case CastExpr::CK_BaseToDerivedMemberPointer: {
+ case CK_BaseToDerivedMemberPointer: {
Expr *SubExpr = E->getSubExpr();
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C) return 0;
- const MemberPointerType *SrcTy =
- SubExpr->getType()->getAs<MemberPointerType>();
- const MemberPointerType *DestTy =
- E->getType()->getAs<MemberPointerType>();
-
- const CXXRecordDecl *DerivedClass =
- cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
-
- if (SrcTy->getPointeeType()->isFunctionProtoType()) {
- llvm::Constant *C =
- CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
- if (!C)
- return 0;
-
- llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
-
- // Check if we need to update the adjustment.
- if (llvm::Constant *Offset =
- CGM.GetNonVirtualBaseClassOffset(DerivedClass, E->getBasePath())) {
- llvm::Constant *Values[2];
-
- Values[0] = CS->getOperand(0);
- Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
- return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
- /*Packed=*/false);
- }
-
- return CS;
- }
+ return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
}
- case CastExpr::CK_BitCast:
+ case CK_BitCast:
// This must be a member function pointer cast.
return Visit(E->getSubExpr());
@@ -792,7 +747,7 @@ public:
case Expr::DeclRefExprClass: {
ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
if (Decl->hasAttr<WeakRefAttr>())
- return CGM.GetWeakRefReference(Decl);
+ return CGM.GetWeakRefReference(Decl);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
return CGM.GetAddrOfFunction(FD);
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
@@ -821,7 +776,7 @@ public:
case Expr::PredefinedExprClass: {
unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
if (CGF) {
- LValue Res = CGF->EmitPredefinedFunctionName(Type);
+ LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
return cast<llvm::Constant>(Res.getAddress());
} else if (Type == PredefinedExpr::PrettyFunction) {
return CGM.GetAddrOfConstantCString("top level", ".tmp");
@@ -989,7 +944,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
uint64_t StartOffset) {
assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
- if (!CGM.getTypes().ContainsPointerToDataMember(T))
+ if (CGM.getTypes().isZeroInitializable(T))
return;
if (const ConstantArrayType *CAT =
@@ -1022,7 +977,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
continue;
// Ignore bases that don't have any pointer to data members.
- if (!CGM.getTypes().ContainsPointerToDataMember(BaseDecl))
+ if (CGM.getTypes().isZeroInitializable(BaseDecl))
continue;
uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
@@ -1036,7 +991,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
E = RD->field_end(); I != E; ++I, ++FieldNo) {
QualType FieldType = I->getType();
- if (!CGM.getTypes().ContainsPointerToDataMember(FieldType))
+ if (CGM.getTypes().isZeroInitializable(FieldType))
continue;
uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
@@ -1061,7 +1016,7 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
- if (!getTypes().ContainsPointerToDataMember(T))
+ if (getTypes().isZeroInitializable(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
@@ -1105,7 +1060,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
continue;
// Ignore bases that don't have any pointer to data members.
- if (!getTypes().ContainsPointerToDataMember(BaseDecl))
+ if (getTypes().isZeroInitializable(BaseDecl))
continue;
// Currently, all bases are arrays of i8. Figure out how many elements
@@ -1165,29 +1120,3 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1ULL,
/*isSigned=*/true);
}
-
-llvm::Constant *
-CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
-
- // Itanium C++ ABI 2.3:
- // A pointer to data member is an offset from the base address of the class
- // object containing it, represented as a ptrdiff_t
-
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
- QualType ClassType =
- getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
-
- const llvm::StructType *ClassLTy =
- cast<llvm::StructType>(getTypes().ConvertType(ClassType));
-
- const CGRecordLayout &RL =
- getTypes().getCGRecordLayout(FD->getParent());
- unsigned FieldNo = RL.getLLVMFieldNo(FD);
- uint64_t Offset =
- getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
-
- const llvm::Type *PtrDiffTy =
- getTypes().ConvertType(getContext().getPointerDiffType());
-
- return llvm::ConstantInt::get(PtrDiffTy, Offset);
-}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index ef38209e1eaf..2318cc4e9aeb 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
@@ -137,7 +138,7 @@ public:
CGF.getContext().typesAreCompatible(
E->getArgType1(), E->getArgType2()));
}
- Value *VisitOffsetOfExpr(const OffsetOfExpr *E);
+ Value *VisitOffsetOfExpr(OffsetOfExpr *E);
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
@@ -149,7 +150,10 @@ public:
Expr::EvalResult Result;
if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
- return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+ CGF.EmitDeclRefExprDbgValue(E, CI);
+ return CI;
}
return EmitLoadOfLValue(E);
}
@@ -235,6 +239,9 @@ public:
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ // If the sub-expression is an instance member reference,
+ // EmitDeclRefLValue will magically emit it with the appropriate
+ // value as the "address".
return EmitLValue(E->getSubExpr()).getAddress();
}
Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
@@ -251,7 +258,6 @@ public:
Value *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
- Value *VisitUnaryOffsetOf(const UnaryOperator *E);
// C++
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
@@ -297,7 +303,7 @@ public:
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
- if (Ops.Ty->isSignedIntegerType()) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -409,11 +415,8 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return Builder.CreateFCmpUNE(Src, Zero, "tobool");
}
- if (SrcType->isMemberPointerType()) {
- // Compare against -1.
- llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType());
- return Builder.CreateICmpNE(Src, NegativeOne, "tobool");
- }
+ if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
"Unknown scalar type to convert");
@@ -562,17 +565,10 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
}
Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
- const llvm::Type *LTy = ConvertType(Ty);
-
- if (!Ty->isMemberPointerType())
- return llvm::Constant::getNullValue(LTy);
-
- assert(!Ty->isMemberFunctionPointerType() &&
- "member function pointers are not scalar!");
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
- // Itanium C++ ABI 2.3:
- // A NULL pointer is represented as -1.
- return llvm::ConstantInt::get(LTy, -1ULL, /*isSigned=*/true);
+ return llvm::Constant::getNullValue(ConvertType(Ty));
}
//===----------------------------------------------------------------------===//
@@ -888,7 +884,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
- if (CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase)
+ if (CE->getCastKind() == CK_UncheckedDerivedToBase)
return false;
if (isa<CXXThisExpr>(E)) {
@@ -897,8 +893,8 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
}
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
- // And that lvalue casts are never null.
- if (ICE->isLvalueCast())
+ // And that glvalue casts are never null.
+ if (ICE->getValueKind() != VK_RValue)
return false;
}
@@ -911,7 +907,7 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
- CastExpr::CastKind Kind = CE->getCastKind();
+ CastKind Kind = CE->getCastKind();
if (!DestTy->isVoidType())
TestAndClearIgnoreResultAssign();
@@ -920,59 +916,58 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
// a default case, so the compiler will warn on a missing case. The cases
// are in the same order as in the CastKind enum.
switch (Kind) {
- case CastExpr::CK_Unknown:
+ case CK_Unknown:
// FIXME: All casts should have a known kind!
//assert(0 && "Unknown cast kind!");
break;
- case CastExpr::CK_LValueBitCast: {
+ case CK_LValueBitCast:
+ case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- // FIXME: Are the qualifiers correct here?
- return EmitLoadOfLValue(LValue::MakeAddr(V, CGF.MakeQualifiers(DestTy)),
- DestTy);
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), DestTy);
}
- case CastExpr::CK_AnyPointerToObjCPointerCast:
- case CastExpr::CK_AnyPointerToBlockPointerCast:
- case CastExpr::CK_BitCast: {
+ case CK_AnyPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast: {
Value *Src = Visit(const_cast<Expr*>(E));
return Builder.CreateBitCast(Src, ConvertType(DestTy));
}
- case CastExpr::CK_NoOp:
- case CastExpr::CK_UserDefinedConversion:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
return Visit(const_cast<Expr*>(E));
- case CastExpr::CK_BaseToDerived: {
+ case CK_BaseToDerived: {
const CXXRecordDecl *DerivedClassDecl =
DestTy->getCXXRecordDeclForPointerType();
return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
- CE->getBasePath(),
+ CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
- case CastExpr::CK_UncheckedDerivedToBase:
- case CastExpr::CK_DerivedToBase: {
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
const RecordType *DerivedClassTy =
E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
- CE->getBasePath(),
+ CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
- case CastExpr::CK_Dynamic: {
+ case CK_Dynamic: {
Value *V = Visit(const_cast<Expr*>(E));
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
return CGF.EmitDynamicCast(V, DCE);
}
- case CastExpr::CK_ToUnion:
+ case CK_ToUnion:
assert(0 && "Should be unreachable!");
break;
- case CastExpr::CK_ArrayToPointerDecay: {
+ case CK_ArrayToPointerDecay: {
assert(E->getType()->isArrayType() &&
"Array to pointer decay must have array source type!");
@@ -990,62 +985,66 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return V;
}
- case CastExpr::CK_FunctionToPointerDecay:
+ case CK_FunctionToPointerDecay:
return EmitLValue(E).getAddress();
- case CastExpr::CK_NullToMemberPointer:
- return CGF.CGM.EmitNullConstant(DestTy);
+ case CK_NullToMemberPointer: {
+ // If the subexpression's type is the C++0x nullptr_t, emit the
+ // subexpression, which may have side effects.
+ if (E->getType()->isNullPtrType())
+ (void) Visit(E);
- case CastExpr::CK_BaseToDerivedMemberPointer:
- case CastExpr::CK_DerivedToBaseMemberPointer: {
- Value *Src = Visit(E);
+ const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
- // See if we need to adjust the pointer.
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
- getClass()->getAs<RecordType>()->getDecl());
- const CXXRecordDecl *DerivedDecl =
- cast<CXXRecordDecl>(CE->getType()->getAs<MemberPointerType>()->
- getClass()->getAs<RecordType>()->getDecl());
- if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- std::swap(DerivedDecl, BaseDecl);
-
- if (llvm::Constant *Adj =
- CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, CE->getBasePath())){
- if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
- Src = Builder.CreateNSWSub(Src, Adj, "adj");
- else
- Src = Builder.CreateNSWAdd(Src, Adj, "adj");
- }
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
- return Src;
+ // Note that the AST doesn't distinguish between checked and
+ // unchecked member pointer conversions, so we always have to
+ // implement checked conversions here. This is inefficient when
+ // actual control flow may be required in order to perform the
+ // check, which it is for data member pointers (but not member
+ // function pointers on Itanium and ARM).
+ return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
}
+
- case CastExpr::CK_ConstructorConversion:
+ case CK_ConstructorConversion:
assert(0 && "Should be unreachable!");
break;
- case CastExpr::CK_IntegralToPointer: {
+ case CK_IntegralToPointer: {
Value *Src = Visit(const_cast<Expr*>(E));
-
+
// First, convert to the correct width so that we control the kind of
// extension.
const llvm::Type *MiddleTy = CGF.IntPtrTy;
bool InputSigned = E->getType()->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
-
+
return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
}
- case CastExpr::CK_PointerToIntegral: {
+ case CK_PointerToIntegral: {
Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Handle conversion to bool correctly.
+ if (DestTy->isBooleanType())
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+
return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
}
- case CastExpr::CK_ToVoid: {
- CGF.EmitAnyExpr(E, 0, false, true);
+ case CK_ToVoid: {
+ if (E->Classify(CGF.getContext()).isGLValue())
+ CGF.EmitLValue(E);
+ else
+ CGF.EmitAnyExpr(E, 0, false, true);
return 0;
}
- case CastExpr::CK_VectorSplat: {
+ case CK_VectorSplat: {
const llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
@@ -1064,16 +1063,19 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
}
- case CastExpr::CK_IntegralCast:
- case CastExpr::CK_IntegralToFloating:
- case CastExpr::CK_FloatingToIntegral:
- case CastExpr::CK_FloatingCast:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
return EmitScalarConversion(Visit(E), E->getType(), DestTy);
- case CastExpr::CK_MemberPointerToBoolean:
- return CGF.EvaluateExprAsBool(E);
+ case CK_MemberPointerToBoolean: {
+ llvm::Value *MemPtr = Visit(E);
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
}
-
+ }
+
// Handle cases where the source is an non-complex type.
if (!CGF.hasAggregateLLVMType(E->getType())) {
@@ -1116,7 +1118,7 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
if (E->getType().isObjCGCWeak())
return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
- return Builder.CreateLoad(V, "tmp");
+ return CGF.EmitLoadOfScalar(V, false, 0, E->getType());
}
//===----------------------------------------------------------------------===//
@@ -1156,7 +1158,7 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
llvm::Value *lhs = LV.getAddress();
lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
- LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy));
+ LV = CGF.MakeAddrLValue(lhs, ValTy);
} else
NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
} else {
@@ -1191,9 +1193,10 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
BinOp.LHS = InVal;
BinOp.RHS = NextVal;
BinOp.Ty = E->getType();
- BinOp.Opcode = BinaryOperator::Add;
+ BinOp.Opcode = BO_Add;
BinOp.E = E;
- return EmitOverflowCheckedBinOp(BinOp);
+ NextVal = EmitOverflowCheckedBinOp(BinOp);
+ break;
}
}
} else {
@@ -1240,7 +1243,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
else
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
- BinOp.Opcode = BinaryOperator::Sub;
+ BinOp.Opcode = BO_Sub;
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -1264,19 +1267,94 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
}
-Value *ScalarExprEmitter::VisitOffsetOfExpr(const OffsetOfExpr *E) {
- Expr::EvalResult Result;
- if(E->Evaluate(Result, CGF.getContext()))
- return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
-
- // FIXME: Cannot support code generation for non-constant offsetof.
- unsigned DiagID = CGF.CGM.getDiags().getCustomDiagID(Diagnostic::Error,
- "cannot compile non-constant __builtin_offsetof");
- CGF.CGM.getDiags().Report(CGF.getContext().getFullLoc(E->getLocStart()),
- DiagID)
- << E->getSourceRange();
-
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
+Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ // Try folding the offsetof to a constant.
+ Expr::EvalResult EvalResult;
+ if (E->Evaluate(EvalResult, CGF.getContext()))
+ return llvm::ConstantInt::get(VMContext, EvalResult.Val.getInt());
+
+ // Loop over the components of the offsetof to compute the value.
+ unsigned n = E->getNumComponents();
+ const llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
+ llvm::Value *Offset = 0;
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ // Compute the index
+ Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
+ llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
+
+ // Save the element type
+ CurrentType =
+ CGF.getContext().getAsArrayType(CurrentType)->getElementType();
+
+ // Compute the element size
+ llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
+ CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
+
+ // Multiply out to compute the result
+ Offset = Builder.CreateMul(Idx, ElemSize);
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Compute the index of the field in its parent.
+ unsigned i = 0;
+ // FIXME: It would be nice if we didn't have to loop here!
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == MemberDecl)
+ break;
+ }
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+
+ // Compute the offset to the field
+ int64_t OffsetInt = RL.getFieldOffset(i) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+
+ // Save the element type.
+ CurrentType = MemberDecl->getType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ if (ON.getBase()->isVirtual()) {
+ CGF.ErrorUnsupported(E, "virtual base in offsetof");
+ continue;
+ }
+
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Save the element type.
+ CurrentType = ON.getBase()->getType();
+
+ // Compute the offset to the base.
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ int64_t OffsetInt = RL.getBaseClassOffset(BaseRD) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ break;
+ }
+ }
+ Result = Builder.CreateAdd(Result, Offset);
+ }
+ return Result;
}
/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
@@ -1327,12 +1405,6 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
-Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) {
- Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
- const llvm::Type* ResultType = ConvertType(E->getType());
- return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
-}
-
//===----------------------------------------------------------------------===//
// Binary Operators
//===----------------------------------------------------------------------===//
@@ -1422,7 +1494,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- else if (Ops.Ty->isUnsignedIntegerType())
+ else if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
else
return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
@@ -1441,18 +1513,18 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
unsigned OpID = 0;
switch (Ops.Opcode) {
- case BinaryOperator::Add:
- case BinaryOperator::AddAssign:
+ case BO_Add:
+ case BO_AddAssign:
OpID = 1;
IID = llvm::Intrinsic::sadd_with_overflow;
break;
- case BinaryOperator::Sub:
- case BinaryOperator::SubAssign:
+ case BO_Sub:
+ case BO_SubAssign:
OpID = 2;
IID = llvm::Intrinsic::ssub_with_overflow;
break;
- case BinaryOperator::Mul:
- case BinaryOperator::MulAssign:
+ case BO_Mul:
+ case BO_MulAssign:
OpID = 3;
IID = llvm::Intrinsic::smul_with_overflow;
break;
@@ -1472,58 +1544,26 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
// Branch in case of overflow.
- llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
- llvm::BasicBlock *overflowBB =
- CGF.createBasicBlock("overflow", CGF.CurFn);
- llvm::BasicBlock *continueBB =
- CGF.createBasicBlock("overflow.continue", CGF.CurFn);
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
- // Handle overflow
-
+ // Handle overflow with llvm.trap.
+ // TODO: it would be better to generate one of these blocks per function.
Builder.SetInsertPoint(overflowBB);
-
- // Handler is:
- // long long *__overflow_handler)(long long a, long long b, char op,
- // char width)
- std::vector<const llvm::Type*> handerArgTypes;
- handerArgTypes.push_back(CGF.Int64Ty);
- handerArgTypes.push_back(CGF.Int64Ty);
- handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
- handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
- llvm::FunctionType *handlerTy =
- llvm::FunctionType::get(CGF.Int64Ty, handerArgTypes, false);
- llvm::Value *handlerFunction =
- CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
- llvm::PointerType::getUnqual(handlerTy));
- handlerFunction = Builder.CreateLoad(handlerFunction);
-
- llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
- Builder.CreateSExt(Ops.LHS, CGF.Int64Ty),
- Builder.CreateSExt(Ops.RHS, CGF.Int64Ty),
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
- llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
- cast<llvm::IntegerType>(opTy)->getBitWidth()));
-
- handlerResult = Builder.CreateTrunc(handlerResult, opTy);
-
- Builder.CreateBr(continueBB);
-
- // Set up the continuation
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+
+ // Continue on.
Builder.SetInsertPoint(continueBB);
- // Get the correct result
- llvm::PHINode *phi = Builder.CreatePHI(opTy);
- phi->reserveOperandSpace(2);
- phi->addIncoming(result, initialBB);
- phi->addIncoming(handlerResult, overflowBB);
-
- return phi;
+ return result;
}
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (!Ops.Ty->isAnyPointerType()) {
- if (Ops.Ty->isSignedIntegerType()) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
@@ -1606,7 +1646,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
- if (Ops.Ty->isSignedIntegerType()) {
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) {
case LangOptions::SOB_Undefined:
return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
@@ -1747,7 +1787,7 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
CGF.EmitBlock(Cont);
}
- if (Ops.Ty->isUnsignedIntegerType())
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
return Builder.CreateLShr(Ops.LHS, RHS, "shr");
return Builder.CreateAShr(Ops.LHS, RHS, "shr");
}
@@ -1757,33 +1797,13 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
- if (LHSTy->isMemberFunctionPointerType()) {
- Value *LHSPtr = CGF.EmitAnyExprToTemp(E->getLHS()).getAggregateAddr();
- Value *RHSPtr = CGF.EmitAnyExprToTemp(E->getRHS()).getAggregateAddr();
- llvm::Value *LHSFunc = Builder.CreateStructGEP(LHSPtr, 0);
- LHSFunc = Builder.CreateLoad(LHSFunc);
- llvm::Value *RHSFunc = Builder.CreateStructGEP(RHSPtr, 0);
- RHSFunc = Builder.CreateLoad(RHSFunc);
- Value *ResultF = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHSFunc, RHSFunc, "cmp.func");
- Value *NullPtr = llvm::Constant::getNullValue(LHSFunc->getType());
- Value *ResultNull = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHSFunc, NullPtr, "cmp.null");
- llvm::Value *LHSAdj = Builder.CreateStructGEP(LHSPtr, 1);
- LHSAdj = Builder.CreateLoad(LHSAdj);
- llvm::Value *RHSAdj = Builder.CreateStructGEP(RHSPtr, 1);
- RHSAdj = Builder.CreateLoad(RHSAdj);
- Value *ResultA = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
- LHSAdj, RHSAdj, "cmp.adj");
- if (E->getOpcode() == BinaryOperator::EQ) {
- Result = Builder.CreateOr(ResultNull, ResultA, "or.na");
- Result = Builder.CreateAnd(Result, ResultF, "and.f");
- } else {
- assert(E->getOpcode() == BinaryOperator::NE &&
- "Member pointer comparison other than == or != ?");
- Result = Builder.CreateAnd(ResultNull, ResultA, "and.na");
- Result = Builder.CreateOr(Result, ResultF, "or.f");
- }
+ if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
+ assert(E->getOpcode() == BO_EQ ||
+ E->getOpcode() == BO_NE);
+ Value *LHS = CGF.EmitScalarExpr(E->getLHS());
+ Value *RHS = CGF.EmitScalarExpr(E->getRHS());
+ Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
} else if (!LHSTy->isAnyComplexType()) {
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
@@ -1791,7 +1811,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
if (LHS->getType()->isFPOrFPVectorTy()) {
Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
LHS, RHS, "cmp");
- } else if (LHSTy->isSignedIntegerType()) {
+ } else if (LHSTy->hasSignedIntegerRepresentation()) {
Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
LHS, RHS, "cmp");
} else {
@@ -1827,10 +1847,10 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
LHS.second, RHS.second, "cmp.i");
}
- if (E->getOpcode() == BinaryOperator::EQ) {
+ if (E->getOpcode() == BO_EQ) {
Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
} else {
- assert(E->getOpcode() == BinaryOperator::NE &&
+ assert(E->getOpcode() == BO_NE &&
"Complex comparison other than == or != ?");
Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
}
@@ -2044,7 +2064,12 @@ VisitConditionalOperator(const ConditionalOperator *E) {
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
-
+ if (!E->getLHS() && CGF.getContext().getLangOptions().CPlusPlus) {
+ // Does not support GNU missing condition extension in C++ yet (see #7726)
+ CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+ return llvm::UndefValue::get(ConvertType(E->getType()));
+ }
+
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
@@ -2188,21 +2213,19 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
V = CreateTempAlloca(ClassPtrTy, "resval");
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
- LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(LV, E->getType());
- }
- else {
- if (E->isArrow())
- V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
- else
- V = EmitLValue(BaseExpr).getAddress();
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(
+ MakeAddrLValue(V, E->getType()), E->getType());
+ } else {
+ if (E->isArrow())
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+ else
+ V = EmitLValue(BaseExpr).getAddress();
}
// build Class* type
ClassPtrTy = ClassPtrTy->getPointerTo();
V = Builder.CreateBitCast(V, ClassPtrTy);
- LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
- return LV;
+ return MakeAddrLValue(V, E->getType());
}
@@ -2212,7 +2235,7 @@ LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
Value *Result = 0;
switch (E->getOpcode()) {
#define COMPOUND_OP(Op) \
- case BinaryOperator::Op##Assign: \
+ case BO_##Op##Assign: \
return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
Result)
COMPOUND_OP(Mul);
@@ -2227,28 +2250,28 @@ LValue CodeGenFunction::EmitCompoundAssignOperatorLValue(
COMPOUND_OP(Or);
#undef COMPOUND_OP
- case BinaryOperator::PtrMemD:
- case BinaryOperator::PtrMemI:
- case BinaryOperator::Mul:
- case BinaryOperator::Div:
- case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
- case BinaryOperator::Shl:
- case BinaryOperator::Shr:
- case BinaryOperator::LT:
- case BinaryOperator::GT:
- case BinaryOperator::LE:
- case BinaryOperator::GE:
- case BinaryOperator::EQ:
- case BinaryOperator::NE:
- case BinaryOperator::And:
- case BinaryOperator::Xor:
- case BinaryOperator::Or:
- case BinaryOperator::LAnd:
- case BinaryOperator::LOr:
- case BinaryOperator::Assign:
- case BinaryOperator::Comma:
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_Comma:
assert(false && "Not valid compound assignment operators");
break;
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index e735a617546c..6a6d63df8f52 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -403,13 +403,14 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
// Objective-C pointer types, we can always bit cast the RHS in these cases.
if (getContext().getCanonicalType(Ivar->getType()) !=
getContext().getCanonicalType(ArgDecl->getType())) {
- ImplicitCastExpr ArgCasted(Ivar->getType(), CastExpr::CK_BitCast, &Arg,
- CXXBaseSpecifierArray(), false);
- BinaryOperator Assign(&IvarRef, &ArgCasted, BinaryOperator::Assign,
+ ImplicitCastExpr ArgCasted(ImplicitCastExpr::OnStack,
+ Ivar->getType(), CK_BitCast, &Arg,
+ VK_RValue);
+ BinaryOperator Assign(&IvarRef, &ArgCasted, BO_Assign,
Ivar->getType(), Loc);
EmitStmt(&Assign);
} else {
- BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+ BinaryOperator Assign(&IvarRef, &Arg, BO_Assign,
Ivar->getType(), Loc);
EmitStmt(&Assign);
}
@@ -571,7 +572,7 @@ void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
Args.push_back(std::make_pair(Src, Exp->getType()));
CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
ReturnValueSlot(),
- Exp->getType(),
+ getContext().VoidTy,
S,
OMD->getClassInterface(),
isCategoryImpl,
@@ -792,7 +793,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
BreakContinueStack.pop_back();
- EmitBlock(AfterBody.Block);
+ EmitBlock(AfterBody.getBlock());
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
@@ -828,7 +829,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
LV.getAddress());
}
- EmitBlock(LoopEnd.Block);
+ EmitBlock(LoopEnd.getBlock());
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index f3c80bcf08e3..d7960beb1d1d 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -167,6 +167,7 @@ public:
bool lval = false);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method);
+ virtual llvm::Constant *GetEHType(QualType T);
virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD);
@@ -192,7 +193,8 @@ public:
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst);
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
llvm::Value *ivarOffset);
@@ -210,6 +212,10 @@ public:
virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) {
+ return NULLPtr;
+ }
};
} // end anonymous namespace
@@ -402,6 +408,11 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
return Builder.CreateLoad(Sel);
}
+llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
+ llvm_unreachable("asking for catch type for ObjC type in GNU runtime");
+ return 0;
+}
+
llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
const std::string &Name) {
llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
@@ -438,7 +449,7 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
/// Generate an NSConstantString object.
llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
- std::string Str(SL->getStrData(), SL->getByteLength());
+ std::string Str = SL->getString().str();
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
@@ -691,7 +702,7 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
Params.push_back(SelectorTy);
llvm::Value *self;
- if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
self = CGF.LoadObjCSelf();
} else {
self = llvm::ConstantPointerNull::get(IdTy);
@@ -721,8 +732,8 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
// The lookup function may have changed the receiver, so make sure we use
// the new one.
- ActualArgs[0] =
- std::make_pair(RValue::get(Builder.CreateLoad(ReceiverPtr)), ASTIdTy);
+ ActualArgs[0] = std::make_pair(RValue::get(
+ Builder.CreateLoad(ReceiverPtr, true)), ASTIdTy);
} else {
std::vector<const llvm::Type*> Params;
Params.push_back(Receiver->getType());
@@ -1854,6 +1865,19 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) {
std::vector<const llvm::Type*> Args(1, IdTy);
@@ -1870,13 +1894,8 @@ void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateCall(SyncEnter, SyncArg);
// Register an all-paths cleanup to release the lock.
- {
- CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
-
- llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncExit, SyncArg);
- }
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, SyncExit, SyncArg);
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
@@ -2007,13 +2026,11 @@ void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
if (S.getFinallyStmt())
CGF.ExitFinallyBlock(FinallyInfo);
- if (Cont.Block) {
- if (Cont.Block->use_empty())
- delete Cont.Block;
- else {
- CGF.EmitBranch(Cont.Block);
- CGF.EmitBlock(Cont.Block);
- }
+ if (Cont.isValid()) {
+ if (Cont.getBlock()->use_empty())
+ delete Cont.getBlock();
+ else
+ CGF.EmitBlock(Cont.getBlock());
}
}
@@ -2075,11 +2092,16 @@ void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
CGBuilderTy B = CGF.Builder;
src = EnforceType(B, src, IdTy);
dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall2(GlobalAssignFn, src, dst);
+ if (!threadlocal)
+ B.CreateCall2(GlobalAssignFn, src, dst);
+ else
+ // FIXME. Add threadloca assign API
+ assert(false && "EmitObjCGlobalAssign - Threal Local API NYI");
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 01ead9efe876..54d0ff21ea44 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -25,7 +25,8 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Frontend/CodeGenOptions.h"
-#include "llvm/Intrinsics.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseSet.h"
@@ -106,17 +107,33 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
- Qualifiers Quals = CGF.MakeQualifiers(IvarTy);
- Quals.addCVRQualifiers(CVRQualifiers);
-
- if (!Ivar->isBitField())
- return LValue::MakeAddr(V, Quals);
+ if (!Ivar->isBitField()) {
+ LValue LV = CGF.MakeAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
- // We need to compute the bit offset for the bit-field, the offset is to the
- // byte. Note, there is a subtle invariant here: we can only call this routine
- // on non-synthesized ivars but we may be called for synthesized ivars.
- // However, a synthesized ivar can never be a bit-field, so this is safe.
- uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = RL.getSize();
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % 8;
+ uint64_t ContainingTypeAlign = 8;
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
uint64_t BitFieldSize =
Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
@@ -126,24 +143,12 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
// layout object. However, this is blocked on other cleanups to the
// Objective-C code, so for now we just live with allocating a bunch of these
// objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
- // We always construct a single, possibly unaligned, access for this case.
- CGBitFieldInfo::AccessInfo AI;
- AI.FieldIndex = 0;
- AI.FieldByteOffset = 0;
- AI.FieldBitStart = BitOffset;
- AI.AccessWidth = CGF.CGM.getContext().getTypeSize(IvarTy);
- AI.AccessAlignment = 0;
- AI.TargetBitOffset = 0;
- AI.TargetBitWidth = BitFieldSize;
-
- CGBitFieldInfo *Info =
- new (CGF.CGM.getContext()) CGBitFieldInfo(BitFieldSize, 1, &AI,
- IvarTy->isSignedIntegerType());
-
- // FIXME: We need to set a very conservative alignment on this, or make sure
- // that the runtime is doing the right thing.
- return LValue::MakeBitfield(V, *Info, Quals.getCVRQualifiers());
+ return LValue::MakeBitfield(V, *Info,
+ IvarTy.getCVRQualifiers() | CVRQualifiers);
}
///
@@ -402,6 +407,16 @@ public:
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
+ /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
+ llvm::Constant *getGcAssignThreadLocalFn() {
+ // id objc_assign_threadlocal(id src, id * dest)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
+ }
+
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::Constant *getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
@@ -425,7 +440,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::Constant *getGcAssignStrongCastFn() {
- // id objc_assign_global(id, id *)
+ // id objc_assign_strongCast(id, id *)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
Args.push_back(ObjectPtrTy->getPointerTo());
llvm::FunctionType *FTy =
@@ -719,25 +734,6 @@ public:
"objc_msgSend_stret_fixup");
}
- llvm::Constant *getMessageSendIdFixupFn() {
- // id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(MessageRefPtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- "objc_msgSendId_fixup");
- }
-
- llvm::Constant *getMessageSendIdStretFixupFn() {
- // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
- std::vector<const llvm::Type*> Params;
- Params.push_back(ObjectPtrTy);
- Params.push_back(MessageRefPtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- "objc_msgSendId_stret_fixup");
- }
llvm::Constant *getMessageSendSuper2FixupFn() {
// id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
@@ -760,28 +756,6 @@ public:
"objc_msgSendSuper2_stret_fixup");
}
-
-
- /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
- /// exception personality function.
- llvm::Value *getEHPersonalityPtr() {
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- true),
- "__objc_personality_v0");
- return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
- }
-
- llvm::Constant *getUnwindResumeOrRethrowFn() {
- std::vector<const llvm::Type*> Params;
- Params.push_back(Int8PtrTy);
- return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false),
- (CGM.getLangOptions().SjLjExceptions ? "_Unwind_SjLj_Resume" :
- "_Unwind_Resume_or_Rethrow"));
- }
-
llvm::Constant *getObjCEndCatchFn() {
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false),
@@ -905,7 +879,6 @@ protected:
/// selector's name. The return value has type char *.
llvm::Constant *GetMethodVarName(Selector Sel);
llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
- llvm::Constant *GetMethodVarName(const std::string &Name);
/// GetMethodVarType - Return a unique constant for the given
/// selector's name. The return value has type char *.
@@ -926,11 +899,15 @@ protected:
/// name. The return value has type char *.
llvm::Constant *GetClassName(IdentifierInfo *Ident);
+ llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD);
+
/// BuildIvarLayout - Builds ivar layout bitmap for the class
/// implementation for the __strong or __weak case.
///
llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
bool ForStrongLayout);
+
+ llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap);
void BuildAggrIvarRecordLayout(const RecordType *RT,
unsigned int BytePos, bool ForStrongLayout,
@@ -1022,6 +999,9 @@ public:
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &);
+
};
class CGObjCMac : public CGObjCCommonMac {
@@ -1053,15 +1033,6 @@ private:
/// EmitSuperClassRef - Emits reference to class's main metadata class.
llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
- CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
- ReturnValueSlot Return,
- QualType ResultType,
- Selector Sel,
- llvm::Value *Arg0,
- QualType Arg0Ty,
- bool IsSuper,
- const CallArgList &CallArgs);
-
/// EmitIvarList - Emit the ivar list for the given
/// implementation. If ForClass is true the list of class ivars
/// (i.e. metaclass ivars) is emitted, otherwise the list of
@@ -1174,6 +1145,8 @@ public:
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
const ObjCMethodDecl *Method);
+ virtual llvm::Constant *GetEHType(QualType T);
+
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
@@ -1198,7 +1171,8 @@ public:
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst);
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
llvm::Value *ivarOffset);
@@ -1343,7 +1317,7 @@ private:
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
- llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
bool ForDefinition);
const char *getMetaclassSymbolPrefix() const {
@@ -1418,6 +1392,8 @@ public:
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
+ virtual llvm::Constant *GetEHType(QualType T);
+
virtual llvm::Constant *GetPropertyGetFunction() {
return ObjCTypes.getGetPropertyFn();
}
@@ -1444,7 +1420,8 @@ public:
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dst);
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
llvm::Value *ivarOffset);
@@ -1515,6 +1492,11 @@ llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
return EmitSelector(Builder, Method->getSelector());
}
+llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
+ return 0;
+}
+
/// Generate a constant CFString object.
/*
struct __builtin_CFString {
@@ -1664,6 +1646,90 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
}
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return Qualifiers::Weak;
+
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return Qualifiers::GCNone;
+}
+
+llvm::Constant *CGObjCCommonMac::GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &DeclRefs) {
+ llvm::Constant *NullPtr =
+ llvm::Constant::getNullValue(llvm::Type::getInt8PtrTy(VMContext));
+ if ((CGM.getLangOptions().getGCMode() == LangOptions::NonGC) ||
+ DeclRefs.empty())
+ return NullPtr;
+ bool hasUnion = false;
+ SkipIvars.clear();
+ IvarsInfo.clear();
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (size_t i = 0; i < DeclRefs.size(); ++i) {
+ const BlockDeclRefExpr *BDRE = DeclRefs[i];
+ const ValueDecl *VD = BDRE->getDecl();
+ CharUnits Offset = CGF.BlockDecls[VD];
+ uint64_t FieldOffset = Offset.getQuantity();
+ QualType Ty = VD->getType();
+ assert(!Ty->isArrayType() &&
+ "Array block variable should have been caught");
+ if ((Ty->isRecordType() || Ty->isUnionType()) && !BDRE->isByRef()) {
+ BuildAggrIvarRecordLayout(Ty->getAs<RecordType>(),
+ FieldOffset,
+ true,
+ hasUnion);
+ continue;
+ }
+
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), Ty);
+ unsigned FieldSize = CGM.getContext().getTypeSize(Ty);
+ // __block variables are passed by their descriptior address. So, size
+ // must reflect this.
+ if (BDRE->isByRef())
+ FieldSize = WordSizeInBits;
+ if (GCAttr == Qualifiers::Strong || BDRE->isByRef())
+ IvarsInfo.push_back(GC_IVAR(FieldOffset,
+ FieldSize / WordSizeInBits));
+ else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
+ SkipIvars.push_back(GC_IVAR(FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+
+ if (IvarsInfo.empty())
+ return NullPtr;
+ // Sort on byte position in case we encounterred a union nested in
+ // block variable type's aggregate type.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n block variable layout for block: ");
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ return C;
+}
+
llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
// FIXME: I don't understand why gcc generates this, or where it is
@@ -1927,8 +1993,9 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
Prop));
}
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
- for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(),
- E = OID->protocol_end(); P != E; ++P)
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); P != E; ++P)
PushProtocolProperties(PropertySet, Properties, Container, (*P),
ObjCTypes);
}
@@ -2116,8 +2183,8 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
llvm::Constant *Protocols =
EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
- Interface->protocol_begin(),
- Interface->protocol_end());
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
unsigned Flags = eClassFlags_Factory;
if (ID->getNumIvarInitializers())
Flags |= eClassFlags_HasCXXStructors;
@@ -2427,8 +2494,7 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
/// given method if it has been defined. The result is null if the
/// method has not been defined. The return value has type MethodPtrTy.
llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
- // FIXME: Use DenseMap::lookup
- llvm::Function *Fn = MethodDefinitions[MD];
+ llvm::Function *Fn = GetMethodDefinition(MD);
if (!Fn)
return 0;
@@ -2529,6 +2595,214 @@ void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
return EmitTryOrSynchronizedStmt(CGF, S);
}
+namespace {
+ struct PerformFragileFinally : EHScopeStack::Cleanup {
+ const Stmt &S;
+ llvm::Value *SyncArgSlot;
+ llvm::Value *CallTryExitVar;
+ llvm::Value *ExceptionData;
+ ObjCTypesHelper &ObjCTypes;
+ PerformFragileFinally(const Stmt *S,
+ llvm::Value *SyncArgSlot,
+ llvm::Value *CallTryExitVar,
+ llvm::Value *ExceptionData,
+ ObjCTypesHelper *ObjCTypes)
+ : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
+ ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isa<ObjCAtTryStmt>(S)) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Save the current cleanup destination in case there's
+ // control flow inside the finally statement.
+ llvm::Value *CurCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot());
+
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateStore(CurCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ } else {
+ // Currently, the end of the cleanup must always exist.
+ CGF.EnsureInsertPoint();
+ }
+ }
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
+ };
+
+ class FragileHazards {
+ CodeGenFunction &CGF;
+ llvm::SmallVector<llvm::Value*, 20> Locals;
+ llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
+
+ llvm::InlineAsm *ReadHazard;
+ llvm::InlineAsm *WriteHazard;
+
+ llvm::FunctionType *GetAsmFnType();
+
+ void collectLocals();
+ void emitReadHazard(CGBuilderTy &Builder);
+
+ public:
+ FragileHazards(CodeGenFunction &CGF);
+
+ void emitWriteHazard();
+ void emitHazardsInNewBlocks();
+ };
+}
+
+/// Create the fragile-ABI read and write hazards based on the current
+/// state of the function, which is presumed to be immediately prior
+/// to a @try block. These hazards are used to maintain correct
+/// semantics in the face of optimization and the fragile ABI's
+/// cavalier use of setjmp/longjmp.
+FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
+ collectLocals();
+
+ if (Locals.empty()) return;
+
+ // Collect all the blocks in the function.
+ for (llvm::Function::iterator
+ I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I)
+ BlocksBeforeTry.insert(&*I);
+
+ llvm::FunctionType *AsmFnTy = GetAsmFnType();
+
+ // Create a read hazard for the allocas. This inhibits dead-store
+ // optimizations and forces the values to memory. This hazard is
+ // inserted before any 'throwing' calls in the protected scope to
+ // reflect the possibility that the variables might be read from the
+ // catch block if the call throws.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "*m";
+ }
+
+ ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+
+ // Create a write hazard for the allocas. This inhibits folding
+ // loads across the hazard. This hazard is inserted at the
+ // beginning of the catch path to reflect the possibility that the
+ // variables might have been written within the protected scope.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "=*m";
+ }
+
+ WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+}
+
+/// Emit a write hazard at the current location.
+void FragileHazards::emitWriteHazard() {
+ if (Locals.empty()) return;
+
+ CGF.Builder.CreateCall(WriteHazard, Locals.begin(), Locals.end())
+ ->setDoesNotThrow();
+}
+
+void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
+ assert(!Locals.empty());
+ Builder.CreateCall(ReadHazard, Locals.begin(), Locals.end())
+ ->setDoesNotThrow();
+}
+
+/// Emit read hazards in all the protected blocks, i.e. all the blocks
+/// which have been inserted since the beginning of the try.
+void FragileHazards::emitHazardsInNewBlocks() {
+ if (Locals.empty()) return;
+
+ CGBuilderTy Builder(CGF.getLLVMContext());
+
+ // Iterate through all blocks, skipping those prior to the try.
+ for (llvm::Function::iterator
+ FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) {
+ llvm::BasicBlock &BB = *FI;
+ if (BlocksBeforeTry.count(&BB)) continue;
+
+ // Walk through all the calls in the block.
+ for (llvm::BasicBlock::iterator
+ BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) {
+ llvm::Instruction &I = *BI;
+
+ // Ignore instructions that aren't non-intrinsic calls.
+ // These are the only calls that can possibly call longjmp.
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (isa<llvm::IntrinsicInst>(I))
+ continue;
+
+ // Ignore call sites marked nounwind. This may be questionable,
+ // since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
+ llvm::CallSite CS(&I);
+ if (CS.doesNotThrow()) continue;
+
+ // Insert a read hazard before the call. This will ensure that
+ // any writes to the locals are performed before making the
+ // call. If the call throws, then this is sufficient to
+ // guarantee correctness as long as it doesn't also write to any
+ // locals.
+ Builder.SetInsertPoint(&BB, BI);
+ emitReadHazard(Builder);
+ }
+ }
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
+ if (V) S.insert(V);
+}
+
+void FragileHazards::collectLocals() {
+ // Compute a set of allocas to ignore.
+ llvm::DenseSet<llvm::Value*> AllocasToIgnore;
+ addIfPresent(AllocasToIgnore, CGF.ReturnValue);
+ addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
+ addIfPresent(AllocasToIgnore, CGF.EHCleanupDest);
+
+ // Collect all the allocas currently in the function. This is
+ // probably way too aggressive.
+ llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock();
+ for (llvm::BasicBlock::iterator
+ I = Entry.begin(), E = Entry.end(); I != E; ++I)
+ if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I))
+ Locals.push_back(&*I);
+}
+
+llvm::FunctionType *FragileHazards::GetAsmFnType() {
+ std::vector<const llvm::Type *> Tys(Locals.size());
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I)
+ Tys[I] = Locals[I]->getType();
+ return llvm::FunctionType::get(CGF.Builder.getVoidTy(), Tys, false);
+}
+
/*
Objective-C setjmp-longjmp (sjlj) Exception Handling
@@ -2657,66 +2931,49 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// For @synchronized, call objc_sync_enter(sync.expr). The
// evaluation of the expression must occur before we enter the
- // @synchronized. We can safely avoid a temp here because jumps into
- // @synchronized are illegal & this will dominate uses.
- llvm::Value *SyncArg = 0;
+ // @synchronized. We can't avoid a temp here because we need the
+ // value to be preserved. If the backend ever does liveness
+ // correctly after setjmp, this will be unnecessary.
+ llvm::Value *SyncArgSlot = 0;
if (!isTry) {
- SyncArg =
+ llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
->setDoesNotThrow();
+
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
}
- // Allocate memory for the exception data and rethrow pointer.
+ // Allocate memory for the setjmp buffer. This needs to be kept
+ // live throughout the try and catch blocks.
llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
"exceptiondata.ptr");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
- "_rethrow");
+
+ // Create the fragile hazards. Note that this will not capture any
+ // of the allocas required for exception processing, but will
+ // capture the current basic block (which extends all the way to the
+ // setjmp call) as "before the @try".
+ FragileHazards Hazards(CGF);
// Create a flag indicating whether the cleanup needs to call
// objc_exception_try_exit. This is true except when
// - no catches match and we're branching through the cleanup
// just to rethrow the exception, or
// - a catch matched and we're falling out of the catch handler.
+ // The setjmp-safety rule here is that we should always store to this
+ // variable in a place that dominates the branch through the cleanup
+ // without passing through any setjmps.
llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
- CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
- CallTryExitVar);
// Push a normal cleanup to leave the try scope.
- {
- CodeGenFunction::CleanupBlock FinallyScope(CGF, NormalCleanup);
-
- // Check whether we need to call objc_exception_try_exit.
- // In optimized code, this branch will always be folded.
- llvm::BasicBlock *FinallyCallExit =
- CGF.createBasicBlock("finally.call_exit");
- llvm::BasicBlock *FinallyNoCallExit =
- CGF.createBasicBlock("finally.no_call_exit");
- CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
- FinallyCallExit, FinallyNoCallExit);
-
- CGF.EmitBlock(FinallyCallExit);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
- ->setDoesNotThrow();
-
- CGF.EmitBlock(FinallyNoCallExit);
-
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
-
- // ~CleanupBlock requires there to be an exit block.
- CGF.EnsureInsertPoint();
- } else {
- // Emit objc_sync_exit(expr); as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
- ->setDoesNotThrow();
- }
- }
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
+ SyncArgSlot,
+ CallTryExitVar,
+ ExceptionData,
+ &ObjCTypes);
// Enter a try block:
// - Call objc_exception_try_enter to push ExceptionData on top of
@@ -2738,68 +2995,71 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
llvm::Value *DidCatch =
- CGF.Builder.CreateIsNull(SetJmpResult, "did_catch_exception");
- CGF.Builder.CreateCondBr(DidCatch, TryBlock, TryHandler);
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock);
// Emit the protected block.
CGF.EmitBlock(TryBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
: cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP();
// Emit the exception handler block.
CGF.EmitBlock(TryHandler);
- // Retrieve the exception object. We may emit multiple blocks but
- // nothing can cross this so the value is already in SSA form.
- llvm::CallInst *Caught =
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData, "caught");
- Caught->setDoesNotThrow();
-
- // Remember the exception to rethrow.
- CGF.Builder.CreateStore(Caught, RethrowPtr);
-
- // Note: at this point, objc_exception_throw already popped the
- // catch handler, so anything that branches to the cleanup needs
- // to set CallTryExitVar to false.
+ // Don't optimize loads of the in-scope locals across this point.
+ Hazards.emitWriteHazard();
// For a @synchronized (or a @try with no catches), just branch
// through the cleanup to the rethrow block.
if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
// Tell the cleanup not to re-pop the exit.
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitVar);
-
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
// Otherwise, we have to match against the caught exceptions.
} else {
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ Caught->setDoesNotThrow();
+
// Push the exception to rethrow onto the EH value stack for the
// benefit of any @throws in the handlers.
CGF.ObjCEHValueStack.push_back(Caught);
const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
-
- // Enter a new exception try block (in case a @catch block throws
- // an exception). Now CallTryExitVar (currently true) is back in
- // synch with reality.
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
- ->setDoesNotThrow();
- llvm::CallInst *SetJmpResult =
- CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
- "setjmp.result");
- SetJmpResult->setDoesNotThrow();
+ bool HasFinally = (AtTryStmt->getFinallyStmt() != 0);
- llvm::Value *Threw =
- CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ llvm::BasicBlock *CatchBlock = 0;
+ llvm::BasicBlock *CatchHandler = 0;
+ if (HasFinally) {
+ // Enter a new exception try block (in case a @catch block
+ // throws an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
- llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
- llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch_for_catch");
- CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+ CatchBlock = CGF.createBasicBlock("catch");
+ CatchHandler = CGF.createBasicBlock("catch_for_catch");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+ }
- CGF.EmitBlock(CatchBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar);
// Handle catch list. As a special case we check if everything is
// matched and avoid generating code for falling off the end if
@@ -2872,7 +3132,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Collect any cleanups for the catch variable. The scope lasts until
// the end of the catch body.
- CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
@@ -2896,43 +3156,55 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.ObjCEHValueStack.pop_back();
- if (!AllMatched) {
- // None of the handlers caught the exception, so store it to be
- // rethrown at the end of the @finally block.
- CGF.Builder.CreateStore(Caught, RethrowPtr);
+ // If nothing wanted anything to do with the caught exception,
+ // kill the extract call.
+ if (Caught->use_empty())
+ Caught->eraseFromParent();
+
+ if (!AllMatched)
CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
- // Emit the exception handler for the @catch blocks.
- CGF.EmitBlock(CatchHandler);
+ if (HasFinally) {
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
- // Rethrow the new exception, not the old one.
- Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData);
- Caught->setDoesNotThrow();
- CGF.Builder.CreateStore(Caught, RethrowPtr);
+ // In theory we might now need a write hazard, but actually it's
+ // unnecessary because there's no local-accessing code between
+ // the try's write hazard and here.
+ //Hazards.emitWriteHazard();
- // Don't pop the catch handler; the throw already did.
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitVar);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ // Don't pop the catch handler; the throw already did.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
}
+ // Insert read hazards as required in the new blocks.
+ Hazards.emitHazardsInNewBlocks();
+
// Pop the cleanup.
+ CGF.Builder.restoreIP(TryFallthroughIP);
+ if (CGF.HaveInsertPoint())
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
CGF.PopCleanupBlock();
- CGF.EmitBlock(FinallyEnd.Block);
+ CGF.EmitBlock(FinallyEnd.getBlock(), true);
// Emit the rethrow block.
- CGF.Builder.ClearInsertionPoint();
- CGF.EmitBlock(FinallyRethrow.Block, true);
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(FinallyRethrow.getBlock(), true);
if (CGF.HaveInsertPoint()) {
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr))
+ // Just look in the buffer for the exception to throw.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), Caught)
->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
}
- CGF.Builder.SetInsertPoint(FinallyEnd.Block);
+ CGF.Builder.restoreIP(SavedIP);
}
void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -2996,7 +3268,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -3007,8 +3280,12 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
- src, dst, "globalassign");
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
return;
}
@@ -3260,6 +3537,22 @@ llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
return getConstantGEP(VMContext, Entry, 0, 0);
}
+llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
+ I = MethodDefinitions.find(MD);
+ if (I != MethodDefinitions.end())
+ return I->second;
+
+ if (MD->hasBody() && MD->getPCHLevel() > 0) {
+ // MD isn't emitted yet because it comes from PCH.
+ CGM.EmitTopLevelDecl(const_cast<ObjCMethodDecl*>(MD));
+ assert(MethodDefinitions[MD] && "EmitTopLevelDecl didn't emit the method!");
+ return MethodDefinitions[MD];
+ }
+
+ return NULL;
+}
+
/// GetIvarLayoutName - Returns a unique constant for the given
/// ivar layout bitmap.
llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
@@ -3267,22 +3560,6 @@ llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
}
-static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
- if (FQT.isObjCGCStrong())
- return Qualifiers::Strong;
-
- if (FQT.isObjCGCWeak())
- return Qualifiers::Weak;
-
- if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
- return Qualifiers::Strong;
-
- if (const PointerType *PT = FQT->getAs<PointerType>())
- return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
-
- return Qualifiers::GCNone;
-}
-
void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
unsigned int BytePos,
bool ForStrongLayout,
@@ -3446,63 +3723,19 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
MaxSkippedUnionIvarSize));
}
-/// BuildIvarLayout - Builds ivar layout bitmap for the class
-/// implementation for the __strong or __weak case.
-/// The layout map displays which words in ivar list must be skipped
-/// and which must be scanned by GC (see below). String is built of bytes.
-/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
-/// of words to skip and right nibble is count of words to scan. So, each
-/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
-/// represented by a 0x00 byte which also ends the string.
-/// 1. when ForStrongLayout is true, following ivars are scanned:
-/// - id, Class
-/// - object *
-/// - __strong anything
-///
-/// 2. When ForStrongLayout is false, following ivars are scanned:
-/// - __weak anything
-///
-llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
- const ObjCImplementationDecl *OMD,
- bool ForStrongLayout) {
- bool hasUnion = false;
-
+/// BuildIvarLayoutBitmap - This routine is the horsework for doing all
+/// the computations and returning the layout bitmap (for ivar or blocks) in
+/// the given argument BitMap string container. Routine reads
+/// two containers, IvarsInfo and SkipIvars which are assumed to be
+/// filled already by the caller.
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string& BitMap) {
unsigned int WordsToScan, WordsToSkip;
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
- return llvm::Constant::getNullValue(PtrTy);
-
- llvm::SmallVector<FieldDecl*, 32> RecFields;
- const ObjCInterfaceDecl *OI = OMD->getClassInterface();
- CGM.getContext().CollectObjCIvars(OI, RecFields);
-
- // Add this implementations synthesized ivars.
- llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().CollectNonClassIvars(OI, Ivars);
- for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
- RecFields.push_back(cast<FieldDecl>(Ivars[k]));
-
- if (RecFields.empty())
- return llvm::Constant::getNullValue(PtrTy);
-
- SkipIvars.clear();
- IvarsInfo.clear();
-
- BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
- if (IvarsInfo.empty())
- return llvm::Constant::getNullValue(PtrTy);
-
- // Sort on byte position in case we encounterred a union nested in
- // the ivar list.
- if (hasUnion && !IvarsInfo.empty())
- std::sort(IvarsInfo.begin(), IvarsInfo.end());
- if (hasUnion && !SkipIvars.empty())
- std::sort(SkipIvars.begin(), SkipIvars.end());
-
+
// Build the string of skip/scan nibbles
llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
unsigned int WordSize =
- CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
if (IvarsInfo[0].ivar_bytepos == 0) {
WordsToSkip = 0;
WordsToScan = IvarsInfo[0].ivar_size;
@@ -3512,7 +3745,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
}
for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
unsigned int TailPrevGCObjC =
- IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
// consecutive 'scanned' object pointers.
WordsToScan += IvarsInfo[i].ivar_size;
@@ -3526,7 +3759,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
SkScan.skip = WordsToSkip;
SkScan.scan = WordsToScan;
SkipScanIvars.push_back(SkScan);
-
+
// Skip the hole.
SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
SkScan.scan = 0;
@@ -3541,15 +3774,15 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
SkScan.scan = WordsToScan;
SkipScanIvars.push_back(SkScan);
}
-
+
if (!SkipIvars.empty()) {
unsigned int LastIndex = SkipIvars.size()-1;
int LastByteSkipped =
- SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
LastIndex = IvarsInfo.size()-1;
int LastByteScanned =
- IvarsInfo[LastIndex].ivar_bytepos +
- IvarsInfo[LastIndex].ivar_size * WordSize;
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
// Compute number of bytes to skip at the tail end of the last ivar scanned.
if (LastByteSkipped > LastByteScanned) {
unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
@@ -3572,20 +3805,19 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
--SkipScan;
}
}
-
+
// Generate the string.
- std::string BitMap;
for (int i = 0; i <= SkipScan; i++) {
unsigned char byte;
unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
-
+
// first skip big.
for (unsigned int ix = 0; ix < skip_big; ix++)
BitMap += (unsigned char)(0xf0);
-
+
// next (skip small, scan)
if (skip_small) {
byte = skip_small << 4;
@@ -3610,11 +3842,71 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
// null terminate string.
unsigned char zero = 0;
BitMap += zero;
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
- if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<ObjCIvarDecl*, 32> Ivars;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
printf("\n%s ivar layout for class '%s': ",
ForStrongLayout ? "strong" : "weak",
- OMD->getClassInterface()->getNameAsCString());
+ OMD->getClassInterface()->getName().data());
const unsigned char *s = (unsigned char*)BitMap.c_str();
for (unsigned i = 0; i < BitMap.size(); i++)
if (!(s[i] & 0xf0))
@@ -3623,12 +3915,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
printf("\n");
}
- llvm::GlobalVariable * Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantArray::get(VMContext, BitMap.c_str()),
- "__TEXT,__cstring,cstring_literals",
- 1, true);
- return getConstantGEP(VMContext, Entry, 0, 0);
+ return C;
}
llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
@@ -3649,11 +3936,6 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
}
-// FIXME: Merge into a single cstring creation function.
-llvm::Constant *CGObjCCommonMac::GetMethodVarName(const std::string &Name) {
- return GetMethodVarName(&CGM.getContext().Idents.get(Name));
-}
-
llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
std::string TypeStr;
CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
@@ -4526,8 +4808,8 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ OID->getName(),
- OID->protocol_begin(),
- OID->protocol_end());
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end());
if (flags & CLS_META)
Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
@@ -4741,7 +5023,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
ObjCTypes.ExternalProtocolPtrTy);
std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
- ProtocolName += PD->getNameAsCString();
+ ProtocolName += PD->getName();
llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
if (PTGV)
@@ -4855,8 +5137,7 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
/// method has not been defined. The return value has type MethodPtrTy.
llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
const ObjCMethodDecl *MD) {
- // FIXME: Use DenseMap::lookup
- llvm::Function *Fn = MethodDefinitions[MD];
+ llvm::Function *Fn = GetMethodDefinition(MD);
if (!Fn)
return 0;
@@ -5284,40 +5565,24 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
llvm::Constant *Fn = 0;
std::string Name("\01l_");
if (CGM.ReturnTypeUsesSRet(FnInfo)) {
-#if 0
- // unlike what is documented. gcc never generates this API!!
- if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
- Fn = ObjCTypes.getMessageSendIdStretFixupFn();
- // FIXME. Is there a better way of getting these names.
- // They are available in RuntimeFunctions vector pair.
- Name += "objc_msgSendId_stret_fixup";
- } else
-#endif
- if (IsSuper) {
- Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
- Name += "objc_msgSendSuper2_stret_fixup";
- } else {
- Fn = ObjCTypes.getMessageSendStretFixupFn();
- Name += "objc_msgSend_stret_fixup";
- }
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ Name += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendStretFixupFn();
+ Name += "objc_msgSend_stret_fixup";
+ }
} else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) {
Fn = ObjCTypes.getMessageSendFpretFixupFn();
Name += "objc_msgSend_fpret_fixup";
} else {
-#if 0
-// unlike what is documented. gcc never generates this API!!
- if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
- Fn = ObjCTypes.getMessageSendIdFixupFn();
- Name += "objc_msgSendId_fixup";
- } else
-#endif
- if (IsSuper) {
- Fn = ObjCTypes.getMessageSendSuper2FixupFn();
- Name += "objc_msgSendSuper2_fixup";
- } else {
- Fn = ObjCTypes.getMessageSendFixupFn();
- Name += "objc_msgSend_fixup";
- }
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ Name += "objc_msgSendSuper2_fixup";
+ } else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
}
assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
Name += '_';
@@ -5647,7 +5912,8 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
/// objc_assign_global (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst) {
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -5658,11 +5924,28 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
- src, dst, "globalassign");
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
return;
}
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
void
CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S) {
@@ -5675,12 +5958,9 @@ CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
->setDoesNotThrow();
// Register an all-paths cleanup to release the lock.
- {
- CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup);
-
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
- ->setDoesNotThrow();
- }
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup,
+ ObjCTypes.getSyncExitFn(),
+ SyncArg);
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
@@ -5697,7 +5977,7 @@ namespace {
llvm::Value *TypeInfo;
};
- struct CallObjCEndCatch : EHScopeStack::LazyCleanup {
+ struct CallObjCEndCatch : EHScopeStack::Cleanup {
CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
@@ -5714,6 +5994,31 @@ namespace {
};
}
+llvm::Constant *
+CGObjCNonFragileABIMac::GetEHType(QualType T) {
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ return IDEHType;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ return GetInterfaceEHType(IT->getDecl(), false);
+}
+
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtTryStmt &S) {
// Jump destination for falling out of catch bodies.
@@ -5749,27 +6054,7 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
break;
}
- // There's a particular fixed type info for 'id'.
- if (CatchDecl->getType()->isObjCIdType() ||
- CatchDecl->getType()->isObjCQualifiedIdType()) {
- llvm::Value *IDEHType =
- CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
- if (!IDEHType)
- IDEHType =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
- false,
- llvm::GlobalValue::ExternalLinkage,
- 0, "OBJC_EHTYPE_id");
- Handler.TypeInfo = IDEHType;
- } else {
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *PT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(PT && "Invalid @catch type.");
- const ObjCInterfaceType *IT = PT->getInterfaceType();
- assert(IT && "Invalid @catch type.");
- Handler.TypeInfo = GetInterfaceEHType(IT->getDecl(), false);
- }
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
}
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
@@ -5802,9 +6087,9 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
// Add a cleanup to leave the catch.
bool EndCatchMightThrow = (Handler.Variable == 0);
- CGF.EHStack.pushLazyCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
- EndCatchMightThrow,
- ObjCTypes.getObjCEndCatchFn());
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ ObjCTypes.getObjCEndCatchFn());
// Bind the catch parameter if it exists.
if (const VarDecl *CatchParam = Handler.Variable) {
@@ -5832,8 +6117,8 @@ void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
if (S.getFinallyStmt())
CGF.ExitFinallyBlock(FinallyInfo);
- if (Cont.Block)
- CGF.EmitBlock(Cont.Block);
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
}
/// EmitThrowStmt - Generate code for a throw statement.
@@ -5868,7 +6153,7 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
CGF.Builder.ClearInsertionPoint();
}
-llvm::Value *
+llvm::Constant *
CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
bool ForDefinition) {
llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index eb79f09de13f..584760f6f343 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -52,6 +52,7 @@ namespace CodeGen {
class Selector;
class ObjCIvarDecl;
class ObjCStringLiteral;
+ class BlockDeclRefExpr;
namespace CodeGen {
class CodeGenModule;
@@ -103,6 +104,12 @@ public:
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
const ObjCMethodDecl *Method) = 0;
+ /// Get the type constant to catch for the given ObjC pointer type.
+ /// This is used externally to implement catching ObjC types in C++.
+ /// Runtimes which don't support this should add the appropriate
+ /// error to Sema.
+ virtual llvm::Constant *GetEHType(QualType T) = 0;
+
/// Generate a constant string object.
virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
@@ -192,7 +199,8 @@ public:
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest) = 0;
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest) = 0;
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false) = 0;
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest,
llvm::Value *ivarOffset) = 0;
@@ -211,6 +219,9 @@ public:
llvm::Value *DestPtr,
llvm::Value *SrcPtr,
llvm::Value *Size) = 0;
+ virtual llvm::Constant *GCBlockLayout(CodeGen::CodeGenFunction &CGF,
+ const llvm::SmallVectorImpl<const BlockDeclRefExpr *> &) = 0;
+
};
/// Creates an instance of an Objective-C runtime class.
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 1cca97702ddd..60df613f65e8 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -11,9 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Type.h"
-#include "clang/AST/RecordLayout.h"
#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
using namespace clang;
using namespace CodeGen;
@@ -45,7 +48,11 @@ class RTTIBuilder {
/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
/// for pointer types.
- void BuildPointerTypeInfo(const PointerType *Ty);
+ void BuildPointerTypeInfo(QualType PointeeTy);
+
+ /// BuildObjCObjectTypeInfo - Build the appropriate kind of
+ /// type_info for an object type.
+ void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
/// struct, used for member pointer types.
@@ -59,7 +66,7 @@ public:
llvm::Constant *BuildName(QualType Ty, bool Hidden,
llvm::GlobalVariable::LinkageTypes Linkage) {
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXRTTIName(Ty, OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, OutName);
llvm::StringRef Name = OutName.str();
llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
@@ -158,7 +165,7 @@ public:
llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Mangle the RTTI name.
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
llvm::StringRef Name = OutName.str();
// Look for an existing global.
@@ -244,11 +251,9 @@ static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
return TypeInfoIsInStandardLibrary(BuiltinTy);
}
-/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
-/// the given type exists somewhere else, and that we should not emit the type
-/// information in this translation unit.
-static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
- QualType Ty) {
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
// Type info for builtin types is defined in the standard library.
if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
return TypeInfoIsInStandardLibrary(BuiltinTy);
@@ -258,6 +263,15 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
return TypeInfoIsInStandardLibrary(PointerTy);
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
+ QualType Ty) {
// If RTTI is disabled, don't consider key functions.
if (!Context.getLangOptions().RTTI) return false;
@@ -270,7 +284,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context,
return false;
// Get the key function.
- const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD);
if (KeyFunction && !KeyFunction->hasBody()) {
// The class has a key function, but it is not defined in this translation
// unit, so we should use the external descriptor for it.
@@ -383,21 +397,45 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
}
void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
- const char *VTableName;
+ // abi::__class_type_info.
+ static const char * const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char * const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char * const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = 0;
switch (Ty->getTypeClass()) {
- default: assert(0 && "Unhandled type!");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ assert(false && "References shouldn't get here");
case Type::Builtin:
- // GCC treats vector types as fundamental types.
+ // GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
+ case Type::Complex:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
// abi::__fundamental_type_info.
VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
break;
case Type::ConstantArray:
case Type::IncompleteArray:
+ case Type::VariableArray:
// abi::__array_type_info.
VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
break;
@@ -412,25 +450,44 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
// abi::__enum_type_info.
VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
break;
-
+
case Type::Record: {
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
if (!RD->hasDefinition() || !RD->getNumBases()) {
- // abi::__class_type_info.
- VTableName = "_ZTVN10__cxxabiv117__class_type_infoE";
+ VTableName = ClassTypeInfo;
} else if (CanUseSingleInheritance(RD)) {
- // abi::__si_class_type_info.
- VTableName = "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ VTableName = SIClassTypeInfo;
} else {
- // abi::__vmi_class_type_info.
- VTableName = "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+ VTableName = VMIClassTypeInfo;
}
break;
}
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ // Fall through.
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
case Type::Pointer:
// abi::__pointer_type_info.
VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
@@ -456,45 +513,64 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
Fields.push_back(VTable);
}
-llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty,
- bool Force) {
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
// Check if we've already emitted an RTTI descriptor for this type.
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, OutName);
llvm::StringRef Name = OutName.str();
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
if (OldGV && !OldGV->isDeclaration())
return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
-
+
// Check if there is already an external RTTI descriptor for this type.
- if (!Force && ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty))
+ bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
+ if (!Force &&
+ (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM.getContext(), Ty)))
return GetAddrOfExternalRTTIDescriptor(Ty);
- llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty);
+ // Emit the standard library with external linkage.
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ if (IsStdLib)
+ Linkage = llvm::GlobalValue::ExternalLinkage;
+ else
+ Linkage = getTypeInfoLinkage(Ty);
// Add the vtable pointer.
BuildVTablePointer(cast<Type>(Ty));
// And the name.
Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
-
+
switch (Ty->getTypeClass()) {
- default: assert(false && "Unhandled type class!");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical and dependent types shouldn't get here");
// GCC treats vector types as fundamental types.
case Type::Builtin:
case Type::Vector:
case Type::ExtVector:
+ case Type::Complex:
+ case Type::BlockPointer:
// Itanium C++ ABI 2.9.5p4:
// abi::__fundamental_type_info adds no data members to std::type_info.
break;
-
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ assert(false && "References shouldn't get here");
+
case Type::ConstantArray:
case Type::IncompleteArray:
+ case Type::VariableArray:
// Itanium C++ ABI 2.9.5p5:
// abi::__array_type_info adds no data members to std::type_info.
break;
@@ -525,11 +601,20 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty,
break;
}
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
+ break;
+
+ case Type::ObjCObjectPointer:
+ BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ break;
case Type::Pointer:
- BuildPointerTypeInfo(cast<PointerType>(Ty));
+ BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
break;
-
+
case Type::MemberPointer:
BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
break;
@@ -551,7 +636,18 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty,
OldGV->replaceAllUsesWith(NewPtr);
OldGV->eraseFromParent();
}
-
+
+ // GCC only relies on the uniqueness of the type names, not the
+ // type_infos themselves, so we can emit these as hidden symbols.
+ // But don't do this if we're worried about strict visibility
+ // compatibility.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ CGM.setTypeVisibility(GV, cast<CXXRecordDecl>(RT->getDecl()),
+ /*ForRTTI*/ true);
+ else if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::WeakODRLinkage)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
}
@@ -570,6 +666,30 @@ static unsigned ComputeQualifierFlags(Qualifiers Quals) {
return Flags;
}
+/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
+/// for the given Objective-C object type.
+void RTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
+ // Drop qualifiers.
+ const Type *T = OT->getBaseType().getTypePtr();
+ assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
+
+ // The builtin types are abi::__class_type_infos and don't require
+ // extra fields.
+ if (isa<BuiltinType>(T)) return;
+
+ ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+
+ // Root classes are also __class_type_info.
+ if (!Super) return;
+
+ QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
+
+ // Everything else is single inheritance.
+ llvm::Constant *BaseTypeInfo = RTTIBuilder(CGM).BuildTypeInfo(SuperTy);
+ Fields.push_back(BaseTypeInfo);
+}
+
/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
@@ -677,7 +797,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// direct proper base. Each description is of the type:
//
// struct abi::__base_class_type_info {
- // public:
+ // public:
// const __class_type_info *__base_type;
// long __offset_flags;
//
@@ -725,9 +845,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
/// used for pointer types.
-void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
- QualType PointeeTy = Ty->getPointeeType();
-
+void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
Qualifiers Quals;
QualType UnqualifiedPointeeTy =
CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index e95591e5cc01..9b4e9f86c6da 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -144,6 +144,21 @@ public:
void print(llvm::raw_ostream &OS) const;
void dump() const;
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size).
+ static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize);
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size). The field decl should be known to be contained within a type of at
+ /// least the given size and with the given alignment.
+ static CGBitFieldInfo MakeInfo(CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign);
};
/// CGRecordLayout - This class handles struct and union layout info while
@@ -174,20 +189,21 @@ private:
/// Whether one of the fields in this record layout is a pointer to data
/// member, or a struct that contains pointer to data member.
- bool ContainsPointerToDataMember : 1;
+ bool IsZeroInitializable : 1;
public:
- CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember)
- : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) {}
+ CGRecordLayout(const llvm::Type *T, bool IsZeroInitializable)
+ : LLVMType(T), IsZeroInitializable(IsZeroInitializable) {}
/// \brief Return the LLVM type associated with this record.
const llvm::Type *getLLVMType() const {
return LLVMType;
}
- /// \brief Check whether this struct contains pointers to data members.
- bool containsPointerToDataMember() const {
- return ContainsPointerToDataMember;
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer.
+ bool isZeroInitializable() const {
+ return IsZeroInitializable;
}
/// \brief Return llvm::StructType element number that corresponds to the
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 9f1687577c3e..77a319fa3ab1 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "CodeGenTypes.h"
+#include "CGCXXABI.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Type.h"
#include "llvm/Support/Debug.h"
@@ -45,10 +46,9 @@ public:
typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
- /// ContainsPointerToDataMember - Whether one of the fields in this record
- /// layout is a pointer to data member, or a struct that contains pointer to
- /// data member.
- bool ContainsPointerToDataMember;
+ /// IsZeroInitializable - Whether this struct can be C++
+ /// zero-initialized with an LLVM zeroinitializer.
+ bool IsZeroInitializable;
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
@@ -115,14 +115,14 @@ private:
unsigned getTypeAlignment(const llvm::Type *Ty) const;
- /// CheckForPointerToDataMember - Check if the given type contains a pointer
+ /// CheckZeroInitializable - Check if the given type contains a pointer
/// to data member.
- void CheckForPointerToDataMember(QualType T);
- void CheckForPointerToDataMember(const CXXRecordDecl *RD);
+ void CheckZeroInitializable(QualType T);
+ void CheckZeroInitializable(const CXXRecordDecl *RD);
public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
- : ContainsPointerToDataMember(false), Packed(false), Types(Types),
+ : IsZeroInitializable(true), Packed(false), Types(Types),
Alignment(0), AlignmentAsLLVMStruct(1),
BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
@@ -157,15 +157,12 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
LayoutFields(D);
}
-static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
- const FieldDecl *FD,
- uint64_t FieldOffset,
- uint64_t FieldSize) {
- const RecordDecl *RD = FD->getParent();
- const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
- uint64_t ContainingTypeSizeInBits = RL.getSize();
- unsigned ContainingTypeAlign = RL.getAlignment();
-
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign) {
const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
@@ -255,6 +252,19 @@ static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
}
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = RL.getSize();
+ unsigned ContainingTypeAlign = RL.getAlignment();
+
+ return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
+ ContainingTypeAlign);
+}
+
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
uint64_t FieldOffset) {
uint64_t FieldSize =
@@ -287,7 +297,8 @@ void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
// Add the bit field info.
LLVMBitFields.push_back(
- LLVMBitFieldInfo(D, ComputeBitFieldInfo(Types, D, FieldOffset, FieldSize)));
+ LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
+ FieldSize)));
AppendBytes(NumBytesToAppend);
@@ -311,8 +322,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
return true;
}
- // Check if we have a pointer to data member in this field.
- CheckForPointerToDataMember(D->getType());
+ CheckZeroInitializable(D->getType());
assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
uint64_t FieldOffsetInBytes = FieldOffset / 8;
@@ -380,7 +390,8 @@ CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
// Add the bit field info.
LLVMBitFields.push_back(
- LLVMBitFieldInfo(Field, ComputeBitFieldInfo(Types, Field, 0, FieldSize)));
+ LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
+ 0, FieldSize)));
return FieldTy;
}
@@ -458,7 +469,7 @@ void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
return;
}
- CheckForPointerToDataMember(BaseDecl);
+ CheckZeroInitializable(BaseDecl);
// FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
AppendPadding(BaseOffset / 8, 1);
@@ -603,9 +614,9 @@ unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
return Types.getTargetData().getABITypeAlignment(Ty);
}
-void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
+void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
// This record already contains a member pointer.
- if (ContainsPointerToDataMember)
+ if (!IsZeroInitializable)
return;
// Can only have member pointers if we're compiling C++.
@@ -615,21 +626,17 @@ void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
T = Types.getContext().getBaseElementType(T);
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
- if (!MPT->getPointeeType()->isFunctionType()) {
- // We have a pointer to data member.
- ContainsPointerToDataMember = true;
- }
+ if (!Types.getCXXABI().isZeroInitializable(MPT))
+ IsZeroInitializable = false;
} else if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- return CheckForPointerToDataMember(RD);
+ CheckZeroInitializable(RD);
}
}
-void
-CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
+void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
// This record already contains a member pointer.
- if (ContainsPointerToDataMember)
+ if (!IsZeroInitializable)
return;
// FIXME: It would be better if there was a way to explicitly compute the
@@ -638,8 +645,8 @@ CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
- if (Layout.containsPointerToDataMember())
- ContainsPointerToDataMember = true;
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = false;
}
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
@@ -652,7 +659,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
Builder.Packed);
CGRecordLayout *RL =
- new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+ new CGRecordLayout(Ty, Builder.IsZeroInitializable);
// Add all the non-virtual base field numbers.
RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
@@ -723,7 +730,7 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
void CGRecordLayout::print(llvm::raw_ostream &OS) const {
OS << "<CGRecordLayout\n";
OS << " LLVMType:" << *LLVMType << "\n";
- OS << " ContainsPointerToDataMember:" << ContainsPointerToDataMember << "\n";
+ OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
OS << " BitFields:[\n";
// Print bit-field infos in declaration order.
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index b72725edca7e..16145f766af2 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -34,7 +34,8 @@ void CodeGenFunction::EmitStopPoint(const Stmt *S) {
DI->setLocation(S->getLocEnd());
else
DI->setLocation(S->getLocStart());
- DI->EmitStopPoint(CurFn, Builder);
+ DI->UpdateLineDirectiveRegion(Builder);
+ DI->EmitStopPoint(Builder);
}
}
@@ -152,7 +153,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getLBracLoc());
- DI->EmitRegionStart(CurFn, Builder);
+ DI->EmitRegionStart(Builder);
}
// Keep track of the current cleanup stack depth.
@@ -164,7 +165,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
if (DI) {
DI->setLocation(S.getRBracLoc());
- DI->EmitRegionEnd(CurFn, Builder);
+ DI->EmitRegionEnd(Builder);
}
RValue RV;
@@ -247,32 +248,35 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
CodeGenFunction::JumpDest
CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
JumpDest &Dest = LabelMap[S];
- if (Dest.Block) return Dest;
+ if (Dest.isValid()) return Dest;
// Create, but don't insert, the new block.
- Dest.Block = createBasicBlock(S->getName());
- Dest.ScopeDepth = EHScopeStack::stable_iterator::invalid();
+ Dest = JumpDest(createBasicBlock(S->getName()),
+ EHScopeStack::stable_iterator::invalid(),
+ NextCleanupDestIndex++);
return Dest;
}
void CodeGenFunction::EmitLabel(const LabelStmt &S) {
JumpDest &Dest = LabelMap[&S];
- // If we didn't needed a forward reference to this label, just go
+ // If we didn't need a forward reference to this label, just go
// ahead and create a destination at the current scope.
- if (!Dest.Block) {
+ if (!Dest.isValid()) {
Dest = getJumpDestInCurrentScope(S.getName());
// Otherwise, we need to give this label a target depth and remove
// it from the branch-fixups list.
} else {
- assert(!Dest.ScopeDepth.isValid() && "already emitted label!");
- Dest.ScopeDepth = EHStack.stable_begin();
+ assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
+ Dest = JumpDest(Dest.getBlock(),
+ EHStack.stable_begin(),
+ Dest.getDestIndex());
- EHStack.resolveBranchFixups(Dest.Block);
+ ResolveBranchFixups(Dest.getBlock());
}
- EmitBlock(Dest.Block);
+ EmitBlock(Dest.getBlock());
}
@@ -372,7 +376,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
// Emit the header for the loop, which will also become
// the continue target.
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
- EmitBlock(LoopHeader.Block);
+ EmitBlock(LoopHeader.getBlock());
// Create an exit block for when the condition fails, which will
// also become the break target.
@@ -408,13 +412,13 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
// As long as the condition is true, go to the loop body.
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
if (EmitBoolCondBranch) {
- llvm::BasicBlock *ExitBlock = LoopExit.Block;
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ConditionScope.requiresCleanups())
ExitBlock = createBasicBlock("while.exit");
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
- if (ExitBlock != LoopExit.Block) {
+ if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
@@ -434,15 +438,15 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
ConditionScope.ForceCleanup();
// Branch to the loop header again.
- EmitBranch(LoopHeader.Block);
+ EmitBranch(LoopHeader.getBlock());
// Emit the exit block.
- EmitBlock(LoopExit.Block, true);
+ EmitBlock(LoopExit.getBlock(), true);
// The LoopHeader typically is just a branch if we skipped emitting
// a branch, try to erase it.
if (!EmitBoolCondBranch)
- SimplifyForwardingBlocks(LoopHeader.Block);
+ SimplifyForwardingBlocks(LoopHeader.getBlock());
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
@@ -462,7 +466,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
BreakContinueStack.pop_back();
- EmitBlock(LoopCond.Block);
+ EmitBlock(LoopCond.getBlock());
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
@@ -481,15 +485,15 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.Block);
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
// Emit the exit block.
- EmitBlock(LoopExit.Block);
+ EmitBlock(LoopExit.getBlock());
// The DoCond block typically is just a branch if we skipped
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
- SimplifyForwardingBlocks(LoopCond.Block);
+ SimplifyForwardingBlocks(LoopCond.getBlock());
}
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
@@ -497,6 +501,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
RunCleanupsScope ForScope(*this);
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(S.getSourceRange().getBegin());
+ DI->EmitRegionStart(Builder);
+ }
+
// Evaluate the first part before the loop.
if (S.getInit())
EmitStmt(S.getInit());
@@ -505,7 +515,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// If there's an increment, the continue scope will be overwritten
// later.
JumpDest Continue = getJumpDestInCurrentScope("for.cond");
- llvm::BasicBlock *CondBlock = Continue.Block;
+ llvm::BasicBlock *CondBlock = Continue.getBlock();
EmitBlock(CondBlock);
// Create a cleanup scope for the condition variable cleanups.
@@ -515,7 +525,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
- llvm::BasicBlock *ExitBlock = LoopExit.Block;
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (S.getConditionVariable()) {
EmitLocalBlockVarDecl(*S.getConditionVariable());
}
@@ -533,7 +543,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
BoolCondVal = EvaluateExprAsBool(S.getCond());
Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
- if (ExitBlock != LoopExit.Block) {
+ if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
@@ -554,12 +564,6 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// Store the blocks to use for break and continue.
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
- CGDebugInfo *DI = getDebugInfo();
- if (DI) {
- DI->setLocation(S.getSourceRange().getBegin());
- DI->EmitRegionStart(CurFn, Builder);
- }
-
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
@@ -569,7 +573,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// If there is an increment, emit it next.
if (S.getInc()) {
- EmitBlock(Continue.Block);
+ EmitBlock(Continue.getBlock());
EmitStmt(S.getInc());
}
@@ -582,11 +586,11 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
if (DI) {
DI->setLocation(S.getSourceRange().getEnd());
- DI->EmitRegionEnd(CurFn, Builder);
+ DI->EmitRegionEnd(Builder);
}
// Emit the fall-through block.
- EmitBlock(LoopExit.Block, true);
+ EmitBlock(LoopExit.getBlock(), true);
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
@@ -839,13 +843,15 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
// Otherwise, just forward the default block to the switch end.
} else {
- DefaultBlock->replaceAllUsesWith(SwitchExit.Block);
+ DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
delete DefaultBlock;
}
}
+ ConditionScope.ForceCleanup();
+
// Emit continuation.
- EmitBlock(SwitchExit.Block, true);
+ EmitBlock(SwitchExit.getBlock(), true);
SwitchInsn = SavedSwitchInsn;
CaseRangeBlock = SavedCRBlock;
@@ -855,16 +861,24 @@ static std::string
SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
+ std::string tmp;
while (*Constraint) {
switch (*Constraint) {
default:
- Result += Target.convertConstraint(*Constraint);
+ tmp = Target.convertConstraint(*Constraint);
+ if (Result.find(tmp) == std::string::npos) // Combine unique constraints
+ Result += tmp;
break;
// Ignore these
case '*':
case '?':
case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case ',': // FIXME - Until the back-end properly supports
+ return Result; // multiple alternative constraints, we stop here.
break;
case 'g':
Result += "imr";
@@ -888,40 +902,50 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
return Result;
}
-llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
- const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr,
- std::string &ConstraintStr) {
+llvm::Value*
+CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr) {
llvm::Value *Arg;
if (Info.allowsRegister() || !Info.allowsMemory()) {
- if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType())) {
- Arg = EmitScalarExpr(InputExpr);
+ if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
+ Arg = EmitLoadOfLValue(InputValue, InputType).getScalarVal();
} else {
- InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
- LValue Dest = EmitLValue(InputExpr);
-
- const llvm::Type *Ty = ConvertType(InputExpr->getType());
+ const llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(VMContext, Size);
Ty = llvm::PointerType::getUnqual(Ty);
- Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
+ Ty));
} else {
- Arg = Dest.getAddress();
+ Arg = InputValue.getAddress();
ConstraintStr += '*';
}
}
} else {
- InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
- LValue Dest = EmitLValue(InputExpr);
- Arg = Dest.getAddress();
+ Arg = InputValue.getAddress();
ConstraintStr += '*';
}
return Arg;
}
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ if (Info.allowsRegister() || !Info.allowsMemory())
+ if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType()))
+ return EmitScalarExpr(InputExpr);
+
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// Analyze the asm string to decompose it into its pieces. We know that Sema
// has already done this, so it is guaranteed to be successful.
@@ -1031,7 +1055,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
+ llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ InOutConstraints);
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index fd7c616b1162..dfb8dc63c5c5 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -15,34 +15,43 @@
using namespace clang;
using namespace CodeGen;
-static void EmitTemporaryCleanup(CodeGenFunction &CGF,
- const CXXTemporary *Temporary,
- llvm::Value *Addr,
- llvm::Value *CondPtr) {
- llvm::BasicBlock *CondEnd = 0;
+namespace {
+ struct DestroyTemporary : EHScopeStack::Cleanup {
+ const CXXTemporary *Temporary;
+ llvm::Value *Addr;
+ llvm::Value *CondPtr;
+
+ DestroyTemporary(const CXXTemporary *Temporary, llvm::Value *Addr,
+ llvm::Value *CondPtr)
+ : Temporary(Temporary), Addr(Addr), CondPtr(CondPtr) {}
+
+ void Emit(CodeGenFunction &CGF, bool IsForEH) {
+ llvm::BasicBlock *CondEnd = 0;
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (CondPtr) {
- llvm::BasicBlock *CondBlock = CGF.createBasicBlock("temp.cond-dtor.call");
- CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (CondPtr) {
+ llvm::BasicBlock *CondBlock =
+ CGF.createBasicBlock("temp.cond-dtor.call");
+ CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
- llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
- CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- CGF.EmitBlock(CondBlock);
- }
+ llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
+ CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ CGF.EmitBlock(CondBlock);
+ }
- CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Addr);
+ CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Addr);
- if (CondPtr) {
- // Reset the condition to false.
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
- CondPtr);
- CGF.EmitBlock(CondEnd);
- }
-}
+ if (CondPtr) {
+ // Reset the condition to false.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CondPtr);
+ CGF.EmitBlock(CondEnd);
+ }
+ }
+ };
+}
/// Emits all the code to cause the given temporary to be cleaned up.
void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
@@ -59,16 +68,11 @@ void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
InitTempAlloca(CondPtr, llvm::ConstantInt::getFalse(VMContext));
// Now set it to true.
- Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
+ Builder.CreateStore(Builder.getTrue(), CondPtr);
}
- CleanupBlock Cleanup(*this, NormalCleanup);
- EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
-
- if (Exceptions) {
- Cleanup.beginEHCleanup();
- EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
- }
+ EHStack.pushCleanup<DestroyTemporary>(NormalAndEHCleanup,
+ Temporary, Ptr, CondPtr);
}
RValue
@@ -76,23 +80,13 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
llvm::Value *AggLoc,
bool IsAggLocVolatile,
bool IsInitializer) {
- RValue RV;
- {
- RunCleanupsScope Scope(*this);
-
- RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ RunCleanupsScope Scope(*this);
+ return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
- }
- return RV;
}
LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
const CXXExprWithTemporaries *E) {
- LValue LV;
- {
- RunCleanupsScope Scope(*this);
-
- LV = EmitLValue(E->getSubExpr());
- }
- return LV;
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(E->getSubExpr());
}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 61c74230e118..56acfc84802e 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
+#include "CGCXXABI.h"
#include "clang/AST/RecordLayout.h"
using namespace clang;
using namespace CodeGen;
@@ -373,7 +374,7 @@ CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
return 0;
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXVTT(RD, OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, OutName);
llvm::StringRef Name = OutName.str();
D1(printf("vtt %s\n", RD->getNameAsCString()));
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 6abac2609f5f..bed4670f7f95 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -13,8 +13,10 @@
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Compiler.h"
@@ -2408,12 +2410,12 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
// Compute the mangled name.
llvm::SmallString<256> Name;
if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
- getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), Thunk.This,
- Name);
+ getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
+ Thunk.This, Name);
else
- getMangleContext().mangleThunk(MD, Thunk, Name);
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Name);
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(MD);
+ const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
return GetOrCreateLLVMFunction(Name, Ty, GD);
}
@@ -2460,6 +2462,54 @@ static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(V, Ptr->getType());
}
+static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk, llvm::Function *Fn) {
+ CGM.setGlobalVisibility(Fn, MD);
+
+ if (!CGM.getCodeGenOpts().HiddenWeakVTables)
+ return;
+
+ // If the thunk has weak/linkonce linkage, but the function must be
+ // emitted in every translation unit that references it, then we can
+ // emit its thunks with hidden visibility, since its thunks must be
+ // emitted when the function is.
+
+ // This follows CodeGenModule::setTypeVisibility; see the comments
+ // there for explanation.
+
+ if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
+ Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
+ Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ if (MD->hasAttr<VisibilityAttr>())
+ return;
+
+ switch (MD->getTemplateSpecializationKind()) {
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ case TSK_Undeclared:
+ break;
+
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's an explicit definition, and that definition is
+ // out-of-line, then we can't assume that all users will have a
+ // definition to emit.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isOutOfLine())
+ return;
+
+ Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
const ThunkInfo &Thunk) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
@@ -2473,13 +2523,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
// CodeGenFunction::GenerateCode.
// Create the implicit 'this' parameter declaration.
- CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
- MD->getLocation(),
- &getContext().Idents.get("this"),
- ThisType);
-
- // Add the 'this' parameter.
- FunctionArgs.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+ CurGD = GD;
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
// Add the rest of the parameters.
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
@@ -2491,6 +2536,8 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation());
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+
// Adjust the 'this' pointer if necessary.
llvm::Value *AdjustedThisPtr =
PerformTypeAdjustment(*this, LoadCXXThis(),
@@ -2514,7 +2561,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
// Get our callee.
const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(GD),
FPT->isVariadic());
llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
@@ -2574,24 +2621,20 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
}
if (!ResultType->isVoidType() && Slot.isNull())
- EmitReturnOfRValue(RV, ResultType);
+ CGM.getCXXABI().EmitReturnFromThunk(CGF, RV, ResultType);
FinishFunction();
- // Destroy the 'this' declaration.
- CXXThisDecl->Destroy(getContext());
-
// Set the right linkage.
CGM.setFunctionLinkage(MD, Fn);
// Set the right visibility.
- CGM.setGlobalVisibility(Fn, MD);
+ setThunkVisibility(CGM, MD, Thunk, Fn);
}
void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
{
llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
// Strip off a bitcast if we got one back.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
@@ -2602,7 +2645,7 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
// There's already a declaration with the same name, check if it has the same
// type or if we need to replace it.
if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
- CGM.getTypes().GetFunctionTypeForVTable(MD)) {
+ CGM.getTypes().GetFunctionTypeForVTable(GD)) {
llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
// If the types mismatch then we have to rewrite the definition.
@@ -2821,8 +2864,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
NextVTableThunkIndex++;
} else {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(MD);
+ const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
Init = CGM.GetAddrOfFunction(GD, Ty);
}
@@ -2889,7 +2931,7 @@ GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXVTable(RD, OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, OutName);
llvm::StringRef Name = OutName.str();
ComputeVTableRelatedInformation(RD, true);
@@ -2928,7 +2970,7 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
VTable->setLinkage(Linkage);
// Set the right visibility.
- CGM.setGlobalVisibility(VTable, RD);
+ CGM.setTypeVisibility(VTable, RD, /*ForRTTI*/ false);
}
llvm::GlobalVariable *
@@ -2949,8 +2991,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Get the mangled construction vtable name.
llvm::SmallString<256> OutName;
- CGM.getMangleContext().mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8,
- Base.getBase(), OutName);
+ CGM.getCXXABI().getMangleContext().
+ mangleCXXCtorVTable(RD, Base.getBaseOffset() / 8, Base.getBase(), OutName);
llvm::StringRef Name = OutName.str();
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 92ef9dc4a11f..f57ecd245f09 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -15,6 +15,7 @@
#ifndef CLANG_CODEGEN_CGVALUE_H
#define CLANG_CODEGEN_CGVALUE_H
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
namespace llvm {
@@ -136,6 +137,9 @@ class LValue {
// 'const' is unused here
Qualifiers Quals;
+ /// The alignment to use when accessing this lvalue.
+ unsigned short Alignment;
+
// objective-c's ivar
bool Ivar:1;
@@ -148,15 +152,20 @@ class LValue {
// Lvalue is a global reference of an objective-c object
bool GlobalObjCRef : 1;
+
+ // Lvalue is a thread local reference
+ bool ThreadLocalRef : 1;
Expr *BaseIvarExp;
private:
- void SetQualifiers(Qualifiers Quals) {
+ void Initialize(Qualifiers Quals, unsigned Alignment = 0) {
this->Quals = Quals;
-
- // FIXME: Convenient place to set objc flags to 0. This should really be
- // done in a user-defined constructor instead.
+ this->Alignment = Alignment;
+ assert(this->Alignment == Alignment && "Alignment exceeds allowed max!");
+
+ // Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ThreadLocalRef = false;
this->BaseIvarExp = 0;
}
@@ -175,30 +184,36 @@ public:
}
bool isObjCIvar() const { return Ivar; }
+ void setObjCIvar(bool Value) { Ivar = Value; }
+
bool isObjCArray() const { return ObjIsArray; }
+ void setObjCArray(bool Value) { ObjIsArray = Value; }
+
bool isNonGC () const { return NonGC; }
+ void setNonGC(bool Value) { NonGC = Value; }
+
bool isGlobalObjCRef() const { return GlobalObjCRef; }
- bool isObjCWeak() const { return Quals.getObjCGCAttr() == Qualifiers::Weak; }
- bool isObjCStrong() const { return Quals.getObjCGCAttr() == Qualifiers::Strong; }
+ void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
+
+ bool isThreadLocalRef() const { return ThreadLocalRef; }
+ void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+
+ bool isObjCWeak() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Weak;
+ }
+ bool isObjCStrong() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Strong;
+ }
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
- unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+ const Qualifiers &getQuals() const { return Quals; }
+ Qualifiers &getQuals() { return Quals; }
- static void SetObjCIvar(LValue& R, bool iValue) {
- R.Ivar = iValue;
- }
- static void SetObjCArray(LValue& R, bool iValue) {
- R.ObjIsArray = iValue;
- }
- static void SetGlobalObjCRef(LValue& R, bool iValue) {
- R.GlobalObjCRef = iValue;
- }
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
- static void SetObjCNonGC(LValue& R, bool iValue) {
- R.NonGC = iValue;
- }
+ unsigned getAlignment() const { return Alignment; }
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
@@ -236,11 +251,15 @@ public:
return KVCRefExpr;
}
- static LValue MakeAddr(llvm::Value *V, Qualifiers Quals) {
+ static LValue MakeAddr(llvm::Value *V, QualType T, unsigned Alignment,
+ ASTContext &Context) {
+ Qualifiers Quals = Context.getCanonicalType(T).getQualifiers();
+ Quals.setObjCGCAttr(Context.getObjCGCAttrKind(T));
+
LValue R;
R.LVType = Simple;
R.V = V;
- R.SetQualifiers(Quals);
+ R.Initialize(Quals, Alignment);
return R;
}
@@ -250,7 +269,7 @@ public:
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
- R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
return R;
}
@@ -260,7 +279,7 @@ public:
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
- R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
return R;
}
@@ -276,7 +295,7 @@ public:
R.LVType = BitField;
R.V = BaseValue;
R.BitFieldInfo = &Info;
- R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
return R;
}
@@ -288,7 +307,7 @@ public:
LValue R;
R.LVType = PropertyRef;
R.PropertyRefExpr = E;
- R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
return R;
}
@@ -297,7 +316,7 @@ public:
LValue R;
R.LVType = KVCRef;
R.KVCRefExpr = E;
- R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+ R.Initialize(Qualifiers::fromCVRMask(CVR));
return R;
}
};
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index eb6c4361be89..51d084e1d301 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
@@ -31,8 +32,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
+ NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
- SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ SwitchInsn(0), CaseRangeBlock(0),
DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
@@ -47,7 +49,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
Exceptions = getContext().getLangOptions().Exceptions;
CatchUndefined = getContext().getLangOptions().CatchUndefined;
- CGM.getMangleContext().startNewFunction();
+ CGM.getCXXABI().getMangleContext().startNewFunction();
}
ASTContext &CodeGenFunction::getContext() const {
@@ -55,17 +57,6 @@ ASTContext &CodeGenFunction::getContext() const {
}
-llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
- llvm::Value *Res = LocalDeclMap[VD];
- assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
- return Res;
-}
-
-llvm::Constant *
-CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
- return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
-}
-
const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
@@ -76,7 +67,7 @@ const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
- T->isMemberFunctionPointerType();
+ T->isObjCObjectType();
}
void CodeGenFunction::EmitReturnBlock() {
@@ -89,26 +80,26 @@ void CodeGenFunction::EmitReturnBlock() {
// We have a valid insert point, reuse it if it is empty or there are no
// explicit jumps to the return block.
- if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
- ReturnBlock.Block->replaceAllUsesWith(CurBB);
- delete ReturnBlock.Block;
+ if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
+ ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.getBlock();
} else
- EmitBlock(ReturnBlock.Block);
+ EmitBlock(ReturnBlock.getBlock());
return;
}
// Otherwise, if the return block is the target of a single direct
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
- if (ReturnBlock.Block->hasOneUse()) {
+ if (ReturnBlock.getBlock()->hasOneUse()) {
llvm::BranchInst *BI =
- dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
if (BI && BI->isUnconditional() &&
- BI->getSuccessor(0) == ReturnBlock.Block) {
+ BI->getSuccessor(0) == ReturnBlock.getBlock()) {
// Reset insertion point and delete the branch.
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
- delete ReturnBlock.Block;
+ delete ReturnBlock.getBlock();
return;
}
}
@@ -117,7 +108,7 @@ void CodeGenFunction::EmitReturnBlock() {
// unless it has uses. However, we still need a place to put the debug
// region.end for now.
- EmitBlock(ReturnBlock.Block);
+ EmitBlock(ReturnBlock.getBlock());
}
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
@@ -139,7 +130,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(EndLoc);
- DI->EmitRegionEnd(CurFn, Builder);
+ DI->EmitFunctionEnd(Builder);
}
EmitFunctionEpilog(*CurFnInfo);
@@ -170,6 +161,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
}
}
+ EmitIfUsed(*this, RethrowBlock.getBlock());
EmitIfUsed(*this, TerminateLandingPad);
EmitIfUsed(*this, TerminateHandler);
EmitIfUsed(*this, UnreachableBlock);
@@ -287,10 +279,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
EmitStartEHSpec(CurCodeDecl);
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (CXXThisDecl)
- CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
- if (CXXVTTDecl)
- CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
@@ -309,6 +299,23 @@ void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
EmitStmt(FD->getBody());
}
+/// Tries to mark the given function nounwind based on the
+/// non-existence of any throwing calls within it. We believe this is
+/// lightweight enough to do at -O0.
+static void TryMarkNoThrow(llvm::Function *F) {
+ // LLVM treats 'nounwind' on a function as part of the type, so we
+ // can't do this on functions that can be overwritten.
+ if (F->mayBeOverridden()) return;
+
+ for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
+ for (llvm::BasicBlock::iterator
+ BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
+ if (!Call->doesNotThrow())
+ return;
+ F->setDoesNotThrow(true);
+}
+
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
@@ -317,30 +324,11 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
DebugInfo = CGM.getDebugInfo();
FunctionArgList Args;
+ QualType ResTy = FD->getResultType();
CurGD = GD;
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isInstance()) {
- // Create the implicit 'this' decl.
- // FIXME: I'm not entirely sure I like using a fake decl just for code
- // generation. Maybe we can come up with a better way?
- CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
- FD->getLocation(),
- &getContext().Idents.get("this"),
- MD->getThisType(getContext()));
- Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
-
- // Check if we need a VTT parameter as well.
- if (CodeGenVTables::needsVTTParameter(GD)) {
- // FIXME: The comment about using a fake decl above applies here too.
- QualType T = getContext().getPointerType(getContext().VoidPtrTy);
- CXXVTTDecl =
- ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
- &getContext().Idents.get("vtt"), T);
- Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
- }
- }
- }
+ if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
if (FD->getNumParams()) {
const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
@@ -355,7 +343,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
// Emit the standard function prologue.
- StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
+ StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin());
// Generate the body of the function.
if (isa<CXXDestructorDecl>(FD))
@@ -368,13 +356,10 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
// Emit the standard function epilogue.
FinishFunction(BodyRange.getEnd());
- // Destroy the 'this' declaration.
- if (CXXThisDecl)
- CXXThisDecl->Destroy(getContext());
-
- // Destroy the VTT declaration.
- if (CXXVTTDecl)
- CXXVTTDecl->Destroy(getContext());
+ // If we haven't marked the function nothrow through other means, do
+ // a quick pass now to see if we can.
+ if (!CurFn->doesNotThrow())
+ TryMarkNoThrow(CurFn);
}
/// ContainsLabel - Return true if the statement contains a label in it. If
@@ -439,7 +424,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
// Handle X && Y in a condition.
- if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
+ if (CondBOp->getOpcode() == BO_LAnd) {
// If we have "1 && X", simplify the code. "0 && X" would have constant
// folded if the case was simple enough.
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
@@ -466,7 +451,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
EndConditionalBranch();
return;
- } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
+ } else if (CondBOp->getOpcode() == BO_LOr) {
// If we have "0 || X", simplify the code. "1 || X" would have constant
// folded if the case was simple enough.
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
@@ -498,7 +483,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
// br(!x, t, f) -> br(x, f, t)
- if (CondUOp->getOpcode() == UnaryOperator::LNot)
+ if (CondUOp->getOpcode() == UO_LNot)
return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
}
@@ -533,21 +518,6 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
- // If the type contains a pointer to data member we can't memset it to zero.
- // Instead, create a null constant and copy it to the destination.
- if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
- llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
-
- llvm::GlobalVariable *NullVariable =
- new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
- /*isConstant=*/true,
- llvm::GlobalVariable::PrivateLinkage,
- NullConstant, llvm::Twine());
- EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
- return;
- }
-
-
// Ignore empty classes in C++.
if (getContext().getLangOptions().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -555,29 +525,58 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
return;
}
}
-
- // Otherwise, just memset the whole thing to zero. This is legal
- // because in LLVM, all default initializers (other than the ones we just
- // handled above) are guaranteed to have a bit pattern of all zeros.
- const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+
+ // Cast the dest ptr to the appropriate i8 pointer type.
+ unsigned DestAS =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ const llvm::Type *BP =
+ llvm::Type::getInt8PtrTy(VMContext, DestAS);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+ uint64_t Size = TypeInfo.first;
+ unsigned Align = TypeInfo.second;
// Don't bother emitting a zero-byte memset.
- if (TypeInfo.first == 0)
+ if (Size == 0)
return;
+ llvm::ConstantInt *SizeVal = llvm::ConstantInt::get(IntPtrTy, Size / 8);
+ llvm::ConstantInt *AlignVal = Builder.getInt32(Align / 8);
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, llvm::Twine());
+ llvm::Value *SrcPtr =
+ Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+
+ // FIXME: variable-size types?
+
+ // Get and call the appropriate llvm.memcpy overload.
+ llvm::Constant *Memcpy =
+ CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), IntPtrTy);
+ Builder.CreateCall5(Memcpy, DestPtr, SrcPtr, SizeVal, AlignVal,
+ /*volatile*/ Builder.getFalse());
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+
// FIXME: Handle variable sized types.
Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
- llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
- // TypeInfo.first describes size in bits.
- llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
- llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
- 0));
+ Builder.getInt8(0),
+ SizeVal, AlignVal, /*volatile*/ Builder.getFalse());
}
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
@@ -585,7 +584,7 @@ llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
if (IndirectBranch == 0)
GetIndirectGotoBlock();
- llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
// Make sure the indirect branch includes all of the address-taken blocks.
IndirectBranch->addDestination(BB);
@@ -666,70 +665,75 @@ llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
assert(Old.isValid());
- EHScopeStack::iterator E = EHStack.find(Old);
- while (EHStack.begin() != E)
- PopCleanupBlock();
-}
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
-/// Destroys a cleanup if it was unused.
-static void DestroyCleanup(CodeGenFunction &CGF,
- llvm::BasicBlock *Entry,
- llvm::BasicBlock *Exit) {
- assert(Entry->use_empty() && "destroying cleanup with uses!");
- assert(Exit->getTerminator() == 0 &&
- "exit has terminator but entry has no predecessors!");
-
- // This doesn't always remove the entire cleanup, but it's much
- // safer as long as we don't know what blocks belong to the cleanup.
- // A *much* better approach if we care about this inefficiency would
- // be to lazily emit the cleanup.
-
- // If the exit block is distinct from the entry, give it a branch to
- // an unreachable destination. This preserves the well-formedness
- // of the IR.
- if (Entry != Exit)
- llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
-
- assert(!Entry->getParent() && "cleanup entry already positioned?");
- // We can't just delete the entry; we have to kill any references to
- // its instructions in other blocks.
- for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end();
- I != E; ++I)
- if (!I->use_empty())
- I->replaceAllUsesWith(llvm::UndefValue::get(I->getType()));
- delete Entry;
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
}
-/// Creates a switch instruction to thread branches out of the given
-/// block (which is the exit block of a cleanup).
-static void CreateCleanupSwitch(CodeGenFunction &CGF,
- llvm::BasicBlock *Block) {
- if (Block->getTerminator()) {
- assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
- "cleanup block already has a terminator, but it isn't a switch");
- return;
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
}
+ return Entry;
+}
- llvm::Value *DestCodePtr
- = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
- CGBuilderTy Builder(Block);
- llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+static llvm::BasicBlock *CreateEHEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isEHCleanup());
+ llvm::BasicBlock *Entry = Scope.getEHBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("eh.cleanup");
+ Scope.setEHBlock(Entry);
+ }
+ return Entry;
+}
- // Create a switch instruction to determine where to jump next.
- Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
}
/// Attempts to reduce a cleanup's entry block to a fallthrough. This
/// is basically llvm::MergeBlockIntoPredecessor, except
-/// simplified/optimized for the tighter constraints on cleanup
-/// blocks.
-static void SimplifyCleanupEntry(CodeGenFunction &CGF,
- llvm::BasicBlock *Entry) {
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
- if (!Pred) return;
+ if (!Pred) return Entry;
llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
- if (!Br || Br->isConditional()) return;
+ if (!Br || Br->isConditional()) return Entry;
assert(Br->getSuccessor(0) == Entry);
// If we were previously inserting at the end of the cleanup entry
@@ -749,145 +753,44 @@ static void SimplifyCleanupEntry(CodeGenFunction &CGF,
if (WasInsertBlock)
CGF.Builder.SetInsertPoint(Pred);
-}
-
-/// Attempts to reduce an cleanup's exit switch to an unconditional
-/// branch.
-static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
- llvm::TerminatorInst *Terminator = Exit->getTerminator();
- assert(Terminator && "completed cleanup exit has no terminator");
-
- llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
- if (!Switch) return;
- if (Switch->getNumCases() != 2) return; // default + 1
- llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
- llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
-
- // Replace the switch instruction with an unconditional branch.
- llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
- Switch->eraseFromParent();
- llvm::BranchInst::Create(Dest, Exit);
-
- // Delete all uses of the condition variable.
- Cond->eraseFromParent();
- while (!CondVar->use_empty())
- cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
-
- // Delete the condition variable itself.
- CondVar->eraseFromParent();
+ return Pred;
}
-/// Threads a branch fixup through a cleanup block.
-static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
- BranchFixup &Fixup,
- llvm::BasicBlock *Entry,
- llvm::BasicBlock *Exit) {
- if (!Exit->getTerminator())
- CreateCleanupSwitch(CGF, Exit);
-
- // Find the switch and its destination index alloca.
- llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
- llvm::Value *DestCodePtr =
- cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
-
- // Compute the index of the new case we're adding to the switch.
- unsigned Index = Switch->getNumCases();
-
- const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
- llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
-
- // Set the index in the origin block.
- new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
-
- // Add a case to the switch.
- Switch->addCase(IndexV, Fixup.Destination);
-
- // Change the last branch to point to the cleanup entry block.
- Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
-
- // And finally, update the fixup.
- Fixup.LatestBranch = Switch;
- Fixup.LatestBranchIndex = Index;
-}
-
-/// Try to simplify both the entry and exit edges of a cleanup.
-static void SimplifyCleanupEdges(CodeGenFunction &CGF,
- llvm::BasicBlock *Entry,
- llvm::BasicBlock *Exit) {
-
- // Given their current implementations, it's important to run these
- // in this order: SimplifyCleanupEntry will delete Entry if it can
- // be merged into its predecessor, which will then break
- // SimplifyCleanupExit if (as is common) Entry == Exit.
-
- SimplifyCleanupExit(Exit);
- SimplifyCleanupEntry(CGF, Entry);
-}
-
-static void EmitLazyCleanup(CodeGenFunction &CGF,
- EHScopeStack::LazyCleanup *Fn,
- bool ForEH) {
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ bool ForEH) {
if (ForEH) CGF.EHStack.pushTerminate();
Fn->Emit(CGF, ForEH);
if (ForEH) CGF.EHStack.popTerminate();
assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
}
-static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
- EHScopeStack::LazyCleanup *Fn,
- bool ForEH,
- llvm::BasicBlock *Entry) {
- assert(Entry && "no entry block for cleanup");
-
- // Remove the switch and load from the end of the entry block.
- llvm::Instruction *Switch = &Entry->getInstList().back();
- Entry->getInstList().remove(Switch);
- assert(isa<llvm::SwitchInst>(Switch));
- llvm::Instruction *Load = &Entry->getInstList().back();
- Entry->getInstList().remove(Load);
- assert(isa<llvm::LoadInst>(Load));
-
- assert(Entry->getInstList().empty() &&
- "lazy cleanup block not empty after removing load/switch pair?");
-
- // Emit the actual cleanup at the end of the entry block.
- CGF.Builder.SetInsertPoint(Entry);
- EmitLazyCleanup(CGF, Fn, ForEH);
-
- // Put the load and switch at the end of the exit block.
- llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
- Exit->getInstList().push_back(Load);
- Exit->getInstList().push_back(Switch);
-
- // Clean up the edges if possible.
- SimplifyCleanupEdges(CGF, Entry, Exit);
-
- CGF.Builder.ClearInsertionPoint();
-}
-
-static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
- assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
- EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
- assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+ assert(Scope.isActive() && "cleanup was still inactive when popped!");
// Check whether we need an EH cleanup. This is only true if we've
// generated a lazy EH cleanup block.
- llvm::BasicBlock *EHEntry = Scope.getEHBlock();
- bool RequiresEHCleanup = (EHEntry != 0);
+ bool RequiresEHCleanup = Scope.hasEHBranches();
// Check the three conditions which might require a normal cleanup:
// - whether there are branch fix-ups through this cleanup
unsigned FixupDepth = Scope.getFixupDepth();
- bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
- // - whether control has already been threaded through this cleanup
- llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
- bool HasExistingBranches = (NormalEntry != 0);
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
// - whether there's a fallthrough
- llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
bool HasFallthrough = (FallthroughSource != 0);
bool RequiresNormalCleanup = false;
@@ -898,9 +801,9 @@ static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
// If we don't need the cleanup at all, we're done.
if (!RequiresNormalCleanup && !RequiresEHCleanup) {
- CGF.EHStack.popCleanup();
- assert(CGF.EHStack.getNumBranchFixups() == 0 ||
- CGF.EHStack.hasNormalCleanups());
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
return;
}
@@ -912,319 +815,527 @@ static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
memcpy(CleanupBuffer.data(),
Scope.getCleanupBuffer(), Scope.getCleanupSize());
CleanupBuffer.set_size(Scope.getCleanupSize());
- EHScopeStack::LazyCleanup *Fn =
- reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ // We want to emit the EH cleanup after the normal cleanup, but go
+ // ahead and do the setup for the EH cleanup while the scope is still
+ // alive.
+ llvm::BasicBlock *EHEntry = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> EHInstsToAppend;
+ if (RequiresEHCleanup) {
+ EHEntry = CreateEHEntry(*this, Scope);
+
+ // Figure out the branch-through dest if necessary.
+ llvm::BasicBlock *EHBranchThroughDest = 0;
+ if (Scope.hasEHBranchThroughs()) {
+ assert(Scope.getEnclosingEHCleanup() != EHStack.stable_end());
+ EHScope &S = *EHStack.find(Scope.getEnclosingEHCleanup());
+ EHBranchThroughDest = CreateEHEntry(*this, cast<EHCleanupScope>(S));
+ }
- // We're done with the scope; pop it off so we can emit the cleanups.
- CGF.EHStack.popCleanup();
+ // If we have exactly one branch-after and no branch-throughs, we
+ // can dispatch it without a switch.
+ if (!Scope.hasEHBranchThroughs() &&
+ Scope.getNumEHBranchAfters() == 1) {
+ assert(!EHBranchThroughDest);
+
+ // TODO: remove the spurious eh.cleanup.dest stores if this edge
+ // never went through any switches.
+ llvm::BasicBlock *BranchAfterDest = Scope.getEHBranchAfterBlock(0);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(BranchAfterDest));
+
+ // Otherwise, if we have any branch-afters, we need a switch.
+ } else if (Scope.getNumEHBranchAfters()) {
+ // The default of the switch belongs to the branch-throughs if
+ // they exist.
+ llvm::BasicBlock *Default =
+ (EHBranchThroughDest ? EHBranchThroughDest : getUnreachableBlock());
+
+ const unsigned SwitchCapacity = Scope.getNumEHBranchAfters();
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getEHCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ EHInstsToAppend.push_back(Load);
+ EHInstsToAppend.push_back(Switch);
+
+ for (unsigned I = 0, E = Scope.getNumEHBranchAfters(); I != E; ++I)
+ Switch->addCase(Scope.getEHBranchAfterIndex(I),
+ Scope.getEHBranchAfterBlock(I));
+
+ // Otherwise, we have only branch-throughs; jump to the next EH
+ // cleanup.
+ } else {
+ assert(EHBranchThroughDest);
+ EHInstsToAppend.push_back(llvm::BranchInst::Create(EHBranchThroughDest));
+ }
+ }
+
+ if (!RequiresNormalCleanup) {
+ EHStack.popCleanup();
+ } else {
+ // As a kindof crazy internal case, branch-through fall-throughs
+ // leave the insertion point set to the end of the last cleanup.
+ bool HasPrebranchedFallthrough =
+ (HasFallthrough && FallthroughSource->getTerminator());
+ assert(!HasPrebranchedFallthrough ||
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock());
- if (RequiresNormalCleanup) {
// If we have a fallthrough and no other need for the cleanup,
// emit it directly.
- if (HasFallthrough && !HasFixups && !HasExistingBranches) {
- EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ // Fixups can cause us to optimistically create a normal block,
+ // only to later have no real uses for it. Just delete it in
+ // this case.
+ // TODO: we can potentially simplify all the uses after this.
+ if (Scope.getNormalBlock()) {
+ Scope.getNormalBlock()->replaceAllUsesWith(getUnreachableBlock());
+ delete Scope.getNormalBlock();
+ }
+
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false);
// Otherwise, the best approach is to thread everything through
// the cleanup block and then try to clean up after ourselves.
} else {
// Force the entry block to exist.
- if (!HasExistingBranches) {
- NormalEntry = CGF.createBasicBlock("cleanup");
- CreateCleanupSwitch(CGF, NormalEntry);
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough && !HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Emit the entry block. This implicitly branches to it if we
+ // have fallthrough. All the fixups and existing branches should
+ // already be branched to it.
+ EmitBlock(NormalEntry);
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (HasFallthrough && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
}
- CGF.EmitBlock(NormalEntry);
-
- // Thread the fallthrough edge through the (momentarily trivial)
- // cleanup.
- llvm::BasicBlock *FallthroughDestination = 0;
- if (HasFallthrough) {
- assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
- FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
-
- BranchFixup Fix;
- Fix.Destination = FallthroughDestination;
- Fix.LatestBranch = FallthroughSource->getTerminator();
- Fix.LatestBranchIndex = 0;
- Fix.Origin = Fix.LatestBranch;
+ llvm::BasicBlock *FallthroughDest = 0;
+ llvm::SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (HasFallthrough && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
- // Restore fixup invariant. EmitBlock added a branch to the
- // cleanup which we need to redirect to the destination.
- cast<llvm::BranchInst>(Fix.LatestBranch)
- ->setSuccessor(0, Fix.Destination);
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
- ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(Switch);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
}
- // Thread any "real" fixups we need to thread.
- for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
- I != E; ++I)
- if (CGF.EHStack.getBranchFixup(I).Destination)
- ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
- NormalEntry, NormalEntry);
-
- SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
-
- if (HasFallthrough)
- CGF.EmitBlock(FallthroughDestination);
+ // We're finally ready to pop the cleanup.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, /*ForEH*/ false);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ if (FallthroughDest)
+ EmitBlock(FallthroughDest);
+ else if (!HasFallthrough)
+ Builder.ClearInsertionPoint();
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ CGF.EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
}
}
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
// Emit the EH cleanup if required.
if (RequiresEHCleanup) {
- CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(EHEntry);
- SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
- CGF.Builder.restoreIP(SavedIP);
- }
-}
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
-/// Pops a cleanup block. If the block includes a normal cleanup, the
-/// current insertion point is threaded through the cleanup, as are
-/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock() {
- assert(!EHStack.empty() && "cleanup stack is empty!");
- if (isa<EHLazyCleanupScope>(*EHStack.begin()))
- return PopLazyCleanupBlock(*this);
+ EmitBlock(EHEntry);
+ EmitCleanup(*this, Fn, /*ForEH*/ true);
- assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
- EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
- assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *EHExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = EHInstsToAppend.size(); I != E; ++I)
+ EHExit->getInstList().push_back(EHInstsToAppend[I]);
- // Handle the EH cleanup if (1) there is one and (2) it's different
- // from the normal cleanup.
- if (Scope.isEHCleanup() &&
- Scope.getEHEntry() != Scope.getNormalEntry()) {
- llvm::BasicBlock *EHEntry = Scope.getEHEntry();
- llvm::BasicBlock *EHExit = Scope.getEHExit();
-
- if (EHEntry->use_empty()) {
- DestroyCleanup(*this, EHEntry, EHExit);
- } else {
- // TODO: this isn't really the ideal location to put this EH
- // cleanup, but lazy emission is a better solution than trying
- // to pick a better spot.
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- EmitBlock(EHEntry);
- Builder.restoreIP(SavedIP);
-
- SimplifyCleanupEdges(*this, EHEntry, EHExit);
- }
- }
+ Builder.restoreIP(SavedIP);
- // If we only have an EH cleanup, we don't really need to do much
- // here. Branch fixups just naturally drop down to the enclosing
- // cleanup scope.
- if (!Scope.isNormalCleanup()) {
- EHStack.popCleanup();
- assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
- return;
+ SimplifyCleanupEntry(*this, EHEntry);
}
+}
- // Check whether the scope has any fixups that need to be threaded.
- unsigned FixupDepth = Scope.getFixupDepth();
- bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.getInnermostNormalCleanup())
+ && "stale jump destination");
- // Grab the entry and exit blocks.
- llvm::BasicBlock *Entry = Scope.getNormalEntry();
- llvm::BasicBlock *Exit = Scope.getNormalExit();
+ if (!HaveInsertPoint())
+ return;
- // Check whether anything's been threaded through the cleanup already.
- assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
- "cleanup entry/exit mismatch");
- bool HasExistingBranches = !Entry->use_empty();
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
- // Check whether we need to emit a "fallthrough" branch through the
- // cleanup for the current insertion point.
- llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
- if (FallThrough && FallThrough->getTerminator())
- FallThrough = 0;
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
- // If *nothing* is using the cleanup, kill it.
- if (!FallThrough && !HasFixups && !HasExistingBranches) {
- EHStack.popCleanup();
- DestroyCleanup(*this, Entry, Exit);
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
return;
}
- // Otherwise, add the block to the function.
- EmitBlock(Entry);
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
- if (FallThrough)
- Builder.SetInsertPoint(Exit);
- else
Builder.ClearInsertionPoint();
-
- // Fast case: if we don't have to add any fixups, and either
- // we don't have a fallthrough or the cleanup wasn't previously
- // used, then the setup above is sufficient.
- if (!HasFixups) {
- if (!FallThrough) {
- assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
- EHStack.popCleanup();
- SimplifyCleanupEdges(*this, Entry, Exit);
- return;
- } else if (!HasExistingBranches) {
- assert(FallThrough && "no reason for cleanup but didn't kill before");
- // We can't simplify the exit edge in this case because we're
- // already inserting at the end of the exit block.
- EHStack.popCleanup();
- SimplifyCleanupEntry(*this, Entry);
- return;
- }
+ return;
}
- // Otherwise we're going to have to thread things through the cleanup.
- llvm::SmallVector<BranchFixup*, 8> Fixups;
-
- // Synthesize a fixup for the current insertion point.
- BranchFixup Cur;
- if (FallThrough) {
- Cur.Destination = createBasicBlock("cleanup.cont");
- Cur.LatestBranch = FallThrough->getTerminator();
- Cur.LatestBranchIndex = 0;
- Cur.Origin = Cur.LatestBranch;
-
- // Restore fixup invariant. EmitBlock added a branch to the cleanup
- // which we need to redirect to the destination.
- cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
+ // Otherwise, thread through all the normal cleanups in scope.
- Fixups.push_back(&Cur);
- } else {
- Cur.Destination = 0;
- }
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
- // Collect any "real" fixups we need to thread.
- for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
- I != E; ++I)
- if (EHStack.getBranchFixup(I).Destination)
- Fixups.push_back(&EHStack.getBranchFixup(I));
-
- assert(!Fixups.empty() && "no fixups, invariants broken!");
-
- // If there's only a single fixup to thread through, do so with
- // unconditional branches. This only happens if there's a single
- // branch and no fallthrough.
- if (Fixups.size() == 1 && !HasExistingBranches) {
- Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
- llvm::BranchInst *Br =
- llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
- Fixups[0]->LatestBranch = Br;
- Fixups[0]->LatestBranchIndex = 0;
-
- // Otherwise, force a switch statement and thread everything through
- // the switch.
- } else {
- CreateCleanupSwitch(*this, Exit);
- for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
- ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
}
- // Emit the fallthrough destination block if necessary.
- if (Cur.Destination)
- EmitBlock(Cur.Destination);
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
- // We're finally done with the cleanup.
- EHStack.popCleanup();
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
}
-void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+void CodeGenFunction::EmitBranchThroughEHCleanup(UnwindDest Dest) {
+ // We should never get invalid scope depths for an UnwindDest; that
+ // implies that the destination wasn't set up correctly.
+ assert(Dest.getScopeDepth().isValid() && "invalid scope depth on EH dest?");
+
if (!HaveInsertPoint())
return;
// Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
- // If we're not in a cleanup scope, we don't need to worry about
- // fixups.
- if (!EHStack.hasNormalCleanups()) {
+ // Calculate the innermost active cleanup.
+ EHScopeStack::stable_iterator
+ InnermostCleanup = EHStack.getInnermostActiveEHCleanup();
+
+ // If the destination is in the same EH cleanup scope as us, we
+ // don't need to thread through anything.
+ if (InnermostCleanup.encloses(Dest.getScopeDepth())) {
Builder.ClearInsertionPoint();
return;
}
+ assert(InnermostCleanup != EHStack.stable_end());
- // Initialize a fixup.
- BranchFixup Fixup;
- Fixup.Destination = Dest.Block;
- Fixup.Origin = BI;
- Fixup.LatestBranch = BI;
- Fixup.LatestBranchIndex = 0;
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getEHCleanupDestSlot(), BI);
- // If we can't resolve the destination cleanup scope, just add this
- // to the current cleanup scope.
- if (!Dest.ScopeDepth.isValid()) {
- EHStack.addBranchFixup() = Fixup;
- Builder.ClearInsertionPoint();
- return;
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(InnermostCleanup));
+ BI->setSuccessor(0, CreateEHEntry(*this, Scope));
}
-
- for (EHScopeStack::iterator I = EHStack.begin(),
- E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
- if (isa<EHCleanupScope>(*I)) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
- if (Scope.isNormalCleanup())
- ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
- Scope.getNormalExit());
- } else if (isa<EHLazyCleanupScope>(*I)) {
- EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
- if (Scope.isNormalCleanup()) {
- llvm::BasicBlock *Block = Scope.getNormalBlock();
- if (!Block) {
- Block = createBasicBlock("cleanup");
- Scope.setNormalBlock(Block);
- }
- ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
- }
+
+ // Add this destination to all the scopes involved.
+ for (EHScopeStack::stable_iterator
+ I = InnermostCleanup, E = Dest.getScopeDepth(); ; ) {
+ assert(E.strictlyEncloses(I));
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isEHCleanup());
+ I = Scope.getEnclosingEHCleanup();
+
+ // If this is the last cleanup we're propagating through, add this
+ // as a branch-after.
+ if (I == E) {
+ Scope.addEHBranchAfter(Index, Dest.getBlock());
+ break;
}
+
+ // Otherwise, add it as a branch-through. If this isn't new
+ // information, all the rest of the work has been done before.
+ if (!Scope.addEHBranchThrough(Dest.getBlock()))
+ break;
}
Builder.ClearInsertionPoint();
}
-void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
- if (!HaveInsertPoint())
- return;
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+void CodeGenFunction::ResolveAllBranchFixups(llvm::SwitchInst *Switch) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set or if we've
+ // already treated it.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
- // Create the branch.
- llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
+ EHStack.clearFixups();
+}
- // If we're not in a cleanup scope, we don't need to worry about
- // fixups.
- if (!EHStack.hasEHCleanups()) {
- Builder.ClearInsertionPoint();
- return;
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
}
- // Initialize a fixup.
- BranchFixup Fixup;
- Fixup.Destination = Dest.Block;
- Fixup.Origin = BI;
- Fixup.LatestBranch = BI;
- Fixup.LatestBranchIndex = 0;
-
- // We should never get invalid scope depths for these: invalid scope
- // depths only arise for as-yet-unemitted labels, and we can't do an
- // EH-unwind to one of those.
- assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
-
- for (EHScopeStack::iterator I = EHStack.begin(),
- E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
- if (isa<EHCleanupScope>(*I)) {
- EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
- if (Scope.isEHCleanup())
- ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
- Scope.getEHExit());
- } else if (isa<EHLazyCleanupScope>(*I)) {
- EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
- if (Scope.isEHCleanup()) {
- llvm::BasicBlock *Block = Scope.getEHBlock();
- if (!Block) {
- Block = createBasicBlock("eh.cleanup");
- Scope.setEHBlock(Block);
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanup(EHScopeStack::stable_iterator C) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ // Calculate whether the cleanup was used:
+ bool Used = false;
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup()) {
+ bool NormalUsed = false;
+ if (Scope.getNormalBlock()) {
+ NormalUsed = true;
+ } else {
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) {
+ NormalUsed = true;
+ break;
}
- ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
+ I = S.getEnclosingNormalCleanup();
}
}
+
+ if (NormalUsed)
+ Used = true;
+ else
+ Scope.setActivatedBeforeNormalUse(true);
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup()) {
+ bool EHUsed = false;
+ if (Scope.getEHBlock()) {
+ EHUsed = true;
+ } else {
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostEHCleanup(); I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getEHBlock()) {
+ EHUsed = true;
+ break;
+ }
+ I = S.getEnclosingEHCleanup();
+ }
+ }
+
+ if (EHUsed)
+ Used = true;
+ else
+ Scope.setActivatedBeforeEHUse(true);
}
- Builder.ClearInsertionPoint();
+ llvm::AllocaInst *Var = EHCleanupScope::activeSentinel();
+ if (Used) {
+ Var = CreateTempAlloca(Builder.getInt1Ty());
+ InitTempAlloca(Var, Builder.getFalse());
+ }
+ Scope.setActiveVar(Var);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+llvm::Value *CodeGenFunction::getEHCleanupDestSlot() {
+ if (!EHCleanupDest)
+ EHCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "eh.cleanup.dest.slot");
+ return EHCleanupDest;
+}
+
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::ConstantInt *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init, Builder);
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 5ee3db08eea0..4f0420536ad2 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -41,6 +41,7 @@ namespace llvm {
}
namespace clang {
+ class APValue;
class ASTContext;
class CXXDestructorDecl;
class CXXTryStmt;
@@ -69,6 +70,7 @@ namespace CodeGen {
class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
+ class CGCXXABI;
/// A branch fixup. These are required when emitting a goto to a
/// label which hasn't been emitted yet. The goto is optimistically
@@ -77,25 +79,34 @@ namespace CodeGen {
/// the innermost cleanup. When a (normal) cleanup is popped, any
/// unresolved fixups in that scope are threaded through the cleanup.
struct BranchFixup {
- /// The origin of the branch. Any switch-index stores required by
- /// cleanup threading are added before this instruction.
- llvm::Instruction *Origin;
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ llvm::BasicBlock *OptimisticBranchBlock;
- /// The destination of the branch.
+ /// The ultimate destination of the branch.
///
/// This can be set to null to indicate that this fixup was
/// successfully resolved.
llvm::BasicBlock *Destination;
- /// The last branch of the fixup. It is an invariant that
- /// LatestBranch->getSuccessor(LatestBranchIndex) == Destination.
- ///
- /// The branch is always either a BranchInst or a SwitchInst.
- llvm::TerminatorInst *LatestBranch;
- unsigned LatestBranchIndex;
+ /// The destination index value.
+ unsigned DestinationIndex;
+
+ /// The initial branch of the fixup.
+ llvm::BranchInst *InitialBranch;
};
-enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
+enum CleanupKind {
+ EHCleanup = 0x1,
+ NormalCleanup = 0x2,
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ InactiveCleanup = 0x4,
+ InactiveEHCleanup = EHCleanup | InactiveCleanup,
+ InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
+ InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
+};
/// A stack of scopes which respond to exceptions, including cleanups
/// and catch blocks.
@@ -117,6 +128,17 @@ public:
bool isValid() const { return Size >= 0; }
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator I) const { return Size <= I.Size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
+
friend bool operator==(stable_iterator A, stable_iterator B) {
return A.Size == B.Size;
}
@@ -125,13 +147,14 @@ public:
}
};
- /// A lazy cleanup. Subclasses must be POD-like: cleanups will
- /// not be destructed, and they will be allocated on the cleanup
- /// stack and freely copied and moved around.
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
///
- /// LazyCleanup implementations should generally be declared in an
+ /// Cleanup implementations should generally be declared in an
/// anonymous namespace.
- class LazyCleanup {
+ class Cleanup {
public:
// Anchor the construction vtable. We use the destructor because
// gcc gives an obnoxious warning if there are virtual methods
@@ -140,7 +163,7 @@ public:
// doesn't seem to be any other way around this warning.
//
// This destructor will never be called.
- virtual ~LazyCleanup();
+ virtual ~Cleanup();
/// Emit the cleanup. For normal cleanups, this is run in the
/// same EH context as when the cleanup was pushed, i.e. the
@@ -177,6 +200,11 @@ private:
/// The number of catches on the stack.
unsigned CatchDepth;
+ /// The current EH destination index. Reset to FirstCatchIndex
+ /// whenever the last EH cleanup is popped.
+ unsigned NextEHDestIndex;
+ enum { FirstEHDestIndex = 1 };
+
/// The current set of branch fixups. A branch fixup is a jump to
/// an as-yet unemitted label, i.e. a label for which we don't yet
/// know the EH stack depth. Whenever we pop a cleanup, we have
@@ -198,64 +226,64 @@ private:
char *allocate(size_t Size);
- void popNullFixups();
-
- void *pushLazyCleanup(CleanupKind K, size_t DataSize);
+ void *pushCleanup(CleanupKind K, size_t DataSize);
public:
EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
InnermostNormalCleanup(stable_end()),
InnermostEHCleanup(stable_end()),
- CatchDepth(0) {}
+ CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {}
~EHScopeStack() { delete[] StartOfBuffer; }
// Variadic templates would make this not terrible.
/// Push a lazily-created cleanup on the stack.
template <class T>
- void pushLazyCleanup(CleanupKind Kind) {
- void *Buffer = pushLazyCleanup(Kind, sizeof(T));
- LazyCleanup *Obj = new(Buffer) T();
+ void pushCleanup(CleanupKind Kind) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T();
(void) Obj;
}
/// Push a lazily-created cleanup on the stack.
template <class T, class A0>
- void pushLazyCleanup(CleanupKind Kind, A0 a0) {
- void *Buffer = pushLazyCleanup(Kind, sizeof(T));
- LazyCleanup *Obj = new(Buffer) T(a0);
+ void pushCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0);
(void) Obj;
}
/// Push a lazily-created cleanup on the stack.
template <class T, class A0, class A1>
- void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1) {
- void *Buffer = pushLazyCleanup(Kind, sizeof(T));
- LazyCleanup *Obj = new(Buffer) T(a0, a1);
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1);
(void) Obj;
}
/// Push a lazily-created cleanup on the stack.
template <class T, class A0, class A1, class A2>
- void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
- void *Buffer = pushLazyCleanup(Kind, sizeof(T));
- LazyCleanup *Obj = new(Buffer) T(a0, a1, a2);
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2);
(void) Obj;
}
/// Push a lazily-created cleanup on the stack.
template <class T, class A0, class A1, class A2, class A3>
- void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
- void *Buffer = pushLazyCleanup(Kind, sizeof(T));
- LazyCleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
(void) Obj;
}
- /// Push a cleanup on the stack.
- void pushCleanup(llvm::BasicBlock *NormalEntry,
- llvm::BasicBlock *NormalExit,
- llvm::BasicBlock *EHEntry,
- llvm::BasicBlock *EHExit);
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3, class A4>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
+ (void) Obj;
+ }
/// Pops a cleanup scope off the stack. This should only be called
/// by CodeGenFunction::PopCleanupBlock.
@@ -298,6 +326,7 @@ public:
stable_iterator getInnermostNormalCleanup() const {
return InnermostNormalCleanup;
}
+ stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h
/// Determines whether there are any EH cleanups on the stack.
bool hasEHCleanups() const {
@@ -309,6 +338,7 @@ public:
stable_iterator getInnermostEHCleanup() const {
return InnermostEHCleanup;
}
+ stable_iterator getInnermostActiveEHCleanup() const; // CGException.h
/// An unstable reference to a scope-stack depth. Invalidated by
/// pushes but not pops.
@@ -359,8 +389,17 @@ public:
return BranchFixups[I];
}
- /// Mark any branch fixups leading to the given block as resolved.
- void resolveBranchFixups(llvm::BasicBlock *Dest);
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+
+ /// Clears the branch-fixups list. This should only be called by
+ /// CodeGenFunction::ResolveAllBranchFixups.
+ void clearFixups() { BranchFixups.clear(); }
+
+ /// Gets the next EH destination index.
+ unsigned getNextEHDestIndex() { return NextEHDestIndex++; }
};
/// CodeGenFunction - This class organizes the per-function state that is used
@@ -368,17 +407,47 @@ public:
class CodeGenFunction : public BlockFunction {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+
+ friend class CGCXXABI;
public:
- /// A jump destination is a pair of a basic block and a cleanup
- /// depth. They are used to implement direct jumps across cleanup
- /// scopes, e.g. goto, break, continue, and return.
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
struct JumpDest {
- JumpDest() : Block(0), ScopeDepth() {}
- JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth)
- : Block(Block), ScopeDepth(Depth) {}
+ JumpDest() : Block(0), ScopeDepth(), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ /// An unwind destination is an abstract label, branching to which
+ /// may require a jump out through EH cleanups.
+ struct UnwindDest {
+ UnwindDest() : Block(0), ScopeDepth(), Index(0) {}
+ UnwindDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ private:
llvm::BasicBlock *Block;
EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
};
CodeGenModule &CGM; // Per-module state.
@@ -406,6 +475,9 @@ public:
/// iff the function has no return value.
llvm::Value *ReturnValue;
+ /// RethrowBlock - Unified rethrow block.
+ UnwindDest RethrowBlock;
+
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
@@ -423,6 +495,12 @@ public:
EHScopeStack EHStack;
+ /// i32s containing the indexes of the cleanup destinations.
+ llvm::AllocaInst *NormalCleanupDest;
+ llvm::AllocaInst *EHCleanupDest;
+
+ unsigned NextCleanupDestIndex;
+
/// The exception slot. All landing pads write the current
/// exception pointer into this alloca.
llvm::Value *ExceptionSlot;
@@ -454,30 +532,17 @@ public:
/// non-trivial destructor.
void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object variant of the given destructor on the object at
+ /// the given address.
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
+ llvm::Value *Addr);
+
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
- void PopCleanupBlock();
-
- /// CleanupBlock - RAII object that will create a cleanup block and
- /// set the insert point to that block. When destructed, it sets the
- /// insert point to the previous block and pushes a new cleanup
- /// entry on the stack.
- class CleanupBlock {
- CodeGenFunction &CGF;
- CGBuilderTy::InsertPoint SavedIP;
- llvm::BasicBlock *NormalCleanupEntryBB;
- llvm::BasicBlock *NormalCleanupExitBB;
- llvm::BasicBlock *EHCleanupEntryBB;
-
- public:
- CleanupBlock(CodeGenFunction &CGF, CleanupKind Kind);
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
- /// If we're currently writing a normal cleanup, tie that off and
- /// start writing an EH cleanup.
- void beginEHCleanup();
-
- ~CleanupBlock();
- };
+ void ActivateCleanup(EHScopeStack::stable_iterator Cleanup);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
@@ -528,18 +593,23 @@ public:
/// the cleanup blocks that have been added.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+ void ResolveAllBranchFixups(llvm::SwitchInst *Switch);
+ void ResolveBranchFixups(llvm::BasicBlock *Target);
+
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
- JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) const {
- return JumpDest(Target, EHStack.stable_begin());
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
+ return JumpDest(Target,
+ EHStack.getInnermostNormalCleanup(),
+ NextCleanupDestIndex++);
}
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
- return JumpDest(createBasicBlock(Name), EHStack.stable_begin());
+ return getJumpDestInCurrentScope(createBasicBlock(Name));
}
/// EmitBranchThroughCleanup - Emit a branch from the current insert
@@ -550,7 +620,11 @@ public:
/// EmitBranchThroughEHCleanup - Emit a branch from the current
/// insert block through the EH cleanup handling code (if any) and
/// then on to \arg Dest.
- void EmitBranchThroughEHCleanup(JumpDest Dest);
+ void EmitBranchThroughEHCleanup(UnwindDest Dest);
+
+ /// getRethrowDest - Returns the unified outermost-scope rethrow
+ /// destination.
+ UnwindDest getRethrowDest();
/// BeginConditionalBranch - Should be called before a conditional part of an
/// expression is emitted. For example, before the RHS of the expression below
@@ -608,10 +682,6 @@ private:
/// statement range in current switch instruction.
llvm::BasicBlock *CaseRangeBlock;
- /// InvokeDest - This is the nearest exception target for calls
- /// which can unwind, when exceptions are being used.
- llvm::BasicBlock *InvokeDest;
-
// VLASizeMap - This keeps track of the associated size for each VLA type.
// We track this by the size expression rather than the type itself because
// in certain situations, like a const qualifier applied to an VLA typedef,
@@ -661,6 +731,7 @@ private:
public:
CodeGenFunction(CodeGenModule &cgm);
+ CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
@@ -668,6 +739,9 @@ public:
/// is assigned in every landing pad.
llvm::Value *getExceptionSlot();
+ llvm::Value *getNormalCleanupDestSlot();
+ llvm::Value *getEHCleanupDestSlot();
+
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
UnreachableBlock = createBasicBlock("unreachable");
@@ -711,15 +785,16 @@ public:
llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
- bool BlockHasCopyDispose,
- CharUnits Size,
+ const CGBlockInfo &Info,
const llvm::StructType *,
+ llvm::Constant *BlockVarLayout,
std::vector<HelperInfo> *);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const BlockExpr *BExpr,
CGBlockInfo &Info,
const Decl *OuterFuncDecl,
+ llvm::Constant *& BlockVarLayout,
llvm::DenseMap<const Decl*, llvm::Value*> ldm);
llvm::Value *LoadBlockStruct();
@@ -777,11 +852,11 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
- /// EmitDtorEpilogue - Emit all code that comes at the end of class's
- /// destructor. This is to call destructors on members and base classes in
- /// reverse order of their construction.
- void EmitDtorEpilogue(const CXXDestructorDecl *Dtor,
- CXXDtorType Type);
+ /// EnterDtorCleanups - Enter the cleanups necessary to complete the
+ /// given phase of destruction for a destructor. The end result
+ /// should call destructors on members and base classes in reverse
+ /// order of their construction.
+ void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
/// ShouldInstrumentFunction - Return true if the current function should be
/// instrumented with __cyg_profile_func_* calls
@@ -898,10 +973,8 @@ public:
// Helpers
//===--------------------------------------------------------------------===//
- Qualifiers MakeQualifiers(QualType T) {
- Qualifiers Quals = getContext().getCanonicalType(T).getQualifiers();
- Quals.setObjCGCAttr(getContext().getObjCGCAttrKind(T));
- return Quals;
+ LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
+ return LValue::MakeAddr(V, T, Alignment, getContext());
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -965,10 +1038,16 @@ public:
void StartBlock(const char *N);
/// GetAddrOfStaticLocalVar - Return the address of a static local variable.
- llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD);
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+ }
/// GetAddrOfLocalVar - Return the address of a local variable.
- llvm::Value *GetAddrOfLocalVar(const VarDecl *VD);
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+ }
/// getAccessedFieldNo - Given an encoded value and a result number, return
/// the input field number being accessed.
@@ -1025,12 +1104,14 @@ public:
/// load of 'this' and returns address of the base class.
llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
- const CXXBaseSpecifierArray &BasePath,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
bool NullCheckValue);
llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
- const CXXBaseSpecifierArray &BasePath,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
bool NullCheckValue);
llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
@@ -1049,13 +1130,15 @@ public:
const ConstantArrayType *ArrayTy,
llvm::Value *ArrayPtr,
CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd);
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd);
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
const ArrayType *Array,
@@ -1224,13 +1307,13 @@ public:
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- QualType Ty);
+ unsigned Alignment, QualType Ty);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
- bool Volatile, QualType Ty);
+ bool Volatile, unsigned Alignment, QualType Ty);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
@@ -1270,7 +1353,6 @@ public:
LValue EmitDeclRefLValue(const DeclRefExpr *E);
LValue EmitStringLiteralLValue(const StringLiteral *E);
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
- LValue EmitPredefinedFunctionName(unsigned Type);
LValue EmitPredefinedLValue(const PredefinedExpr *E);
LValue EmitUnaryOpLValue(const UnaryOperator *E);
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
@@ -1319,7 +1401,7 @@ public:
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
-
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::ConstantInt *Init);
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
//===--------------------------------------------------------------------===//
@@ -1386,7 +1468,8 @@ public:
llvm::SmallVectorImpl<llvm::Value*> &O,
const char *name, bool splat = false,
unsigned shift = 0, bool rightshift = false);
- llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx,
+ bool widen = false);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
bool negateForRightShift);
@@ -1542,7 +1625,7 @@ public:
/// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
/// generate a branch around the created basic block as necessary.
- llvm::BasicBlock* getTrapBB();
+ llvm::BasicBlock *getTrapBB();
/// EmitCallArg - Emit a single call argument.
RValue EmitCallArg(const Expr *E, QualType ArgType);
@@ -1575,6 +1658,11 @@ private:
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
+ llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr);
+
/// EmitCallArgs - Emit call arguments for a function.
/// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
@@ -1622,7 +1710,36 @@ private:
void EmitDeclMetadata();
};
-
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// DeclRefs - Variables from parent scopes that have been
+ /// imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
+
+ /// InnerBlocks - This block and the blocks it encloses.
+ llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
+
+ /// CXXThisRef - Non-null if 'this' was required somewhere, in
+ /// which case this is that expression.
+ const CXXThisExpr *CXXThisRef;
+
+ /// NeedsObjCSelf - True if something in this block has an implicit
+ /// reference to 'self'.
+ bool NeedsObjCSelf;
+
+ /// These are initialized by GenerateBlockFunction.
+ bool BlockHasCopyDispose;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ CGBlockInfo(const char *Name);
+};
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index bf606a616582..d125b370a07a 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -15,6 +15,7 @@
#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CGCall.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "Mangle.h"
#include "TargetInfo.h"
@@ -41,6 +42,17 @@
using namespace clang;
using namespace CodeGen;
+static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getContext().Target.getCXXABI()) {
+ case CXXABI_ARM: return *CreateARMCXXABI(CGM);
+ case CXXABI_Itanium: return *CreateItaniumCXXABI(CGM);
+ case CXXABI_Microsoft: return *CreateMicrosoftCXXABI(CGM);
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+ return *CreateItaniumCXXABI(CGM);
+}
+
CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
llvm::Module &M, const llvm::TargetData &TD,
@@ -48,11 +60,15 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
: BlockModule(C, M, TD, Types, *this), Context(C),
Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
- Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
- VTables(*this), Runtime(0), ABI(0),
- CFConstantStringClassRef(0),
- NSConstantStringClassRef(0),
- VMContext(M.getContext()) {
+ ABI(createCXXABI(*this)),
+ Types(C, M, TD, getTargetCodeGenInfo().getABIInfo(), ABI),
+ VTables(*this), Runtime(0),
+ CFConstantStringClassRef(0), NSConstantStringClassRef(0),
+ VMContext(M.getContext()),
+ NSConcreteGlobalBlockDecl(0), NSConcreteStackBlockDecl(0),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
+ BlockObjectAssignDecl(0), BlockObjectDisposeDecl(0),
+ BlockObjectAssign(0), BlockObjectDispose(0){
if (!Features.ObjC1)
Runtime = 0;
@@ -63,17 +79,13 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
else
Runtime = CreateMacObjCRuntime(*this);
- if (!Features.CPlusPlus)
- ABI = 0;
- else createCXXABI();
-
// If debug info generation is enabled, create the CGDebugInfo object.
DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
}
CodeGenModule::~CodeGenModule() {
delete Runtime;
- delete ABI;
+ delete &ABI;
delete DebugInfo;
}
@@ -86,13 +98,6 @@ void CodeGenModule::createObjCRuntime() {
Runtime = CreateMacObjCRuntime(*this);
}
-void CodeGenModule::createCXXABI() {
- if (Context.Target.getCXXABI() == "microsoft")
- ABI = CreateMicrosoftCXXABI(*this);
- else
- ABI = CreateItaniumCXXABI(*this);
-}
-
void CodeGenModule::Release() {
EmitDeferred();
EmitCXXGlobalInitFunc();
@@ -141,17 +146,17 @@ void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
LangOptions::VisibilityMode
CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
- if (VD->getStorageClass() == VarDecl::PrivateExtern)
+ if (VD->getStorageClass() == SC_PrivateExtern)
return LangOptions::Hidden;
if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
switch (attr->getVisibility()) {
default: assert(0 && "Unknown visibility!");
- case VisibilityAttr::DefaultVisibility:
+ case VisibilityAttr::Default:
return LangOptions::Default;
- case VisibilityAttr::HiddenVisibility:
+ case VisibilityAttr::Hidden:
return LangOptions::Hidden;
- case VisibilityAttr::ProtectedVisibility:
+ case VisibilityAttr::Protected:
return LangOptions::Protected;
}
}
@@ -187,9 +192,11 @@ CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
return LangOptions::Hidden;
}
- // This decl should have the same visibility as its parent.
+ // If this decl is contained in a class, it should have the same visibility
+ // as the parent class.
if (const DeclContext *DC = D->getDeclContext())
- return getDeclVisibilityMode(cast<Decl>(DC));
+ if (DC->isRecord())
+ return getDeclVisibilityMode(cast<Decl>(DC));
return getLangOptions().getVisibilityMode();
}
@@ -213,6 +220,67 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
}
+/// Set the symbol visibility of type information (vtable and RTTI)
+/// associated with the given type.
+void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
+ const CXXRecordDecl *RD,
+ bool IsForRTTI) const {
+ setGlobalVisibility(GV, RD);
+
+ if (!CodeGenOpts.HiddenWeakVTables)
+ return;
+
+ // We want to drop the visibility to hidden for weak type symbols.
+ // This isn't possible if there might be unresolved references
+ // elsewhere that rely on this symbol being visible.
+
+ // This should be kept roughly in sync with setThunkVisibility
+ // in CGVTables.cpp.
+
+ // Preconditions.
+ if (GV->getLinkage() != llvm::GlobalVariable::WeakODRLinkage ||
+ GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ // Don't override an explicit visibility attribute.
+ if (RD->hasAttr<VisibilityAttr>())
+ return;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ // We have to disable the optimization if this is an EI definition
+ // because there might be EI declarations in other shared objects.
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ // Every use of a non-template class's type information has to emit it.
+ case TSK_Undeclared:
+ break;
+
+ // In theory, implicit instantiations can ignore the possibility of
+ // an explicit instantiation declaration because there necessarily
+ // must be an EI definition somewhere with default visibility. In
+ // practice, it's possible to have an explicit instantiation for
+ // an arbitrary template class, and linkers aren't necessarily able
+ // to deal with mixed-visibility symbols.
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CodeGenOpts.HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's a key function, there may be translation units
+ // that don't have the key function's definition. But ignore
+ // this if we're emitting RTTI under -fno-rtti.
+ if (!IsForRTTI || Features.RTTI)
+ if (Context.getKeyFunction(RD))
+ return;
+
+ // Otherwise, drop the visibility to hidden.
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
@@ -220,7 +288,7 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
if (!Str.empty())
return Str;
- if (!getMangleContext().shouldMangleDeclName(ND)) {
+ if (!getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
@@ -230,13 +298,13 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
llvm::SmallString<256> Buffer;
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
- getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer);
else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
- getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
- getMangleContext().mangleBlock(GD, BD, Buffer);
+ getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer);
else
- getMangleContext().mangleName(ND, Buffer);
+ getCXXABI().getMangleContext().mangleName(ND, Buffer);
// Allocate space for the mangled name.
size_t Length = Buffer.size();
@@ -250,7 +318,7 @@ llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer,
const BlockDecl *BD) {
- getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
+ getCXXABI().getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer());
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) {
@@ -319,68 +387,9 @@ void CodeGenModule::EmitAnnotations() {
gv->setSection("llvm.metadata");
}
-static CodeGenModule::GVALinkage
-GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
- const LangOptions &Features) {
- CodeGenModule::GVALinkage External = CodeGenModule::GVA_StrongExternal;
-
- Linkage L = FD->getLinkage();
- if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
- FD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- return CodeGenModule::GVA_Internal;
-
- case ExternalLinkage:
- switch (FD->getTemplateSpecializationKind()) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- External = CodeGenModule::GVA_StrongExternal;
- break;
-
- case TSK_ExplicitInstantiationDefinition:
- return CodeGenModule::GVA_ExplicitTemplateInstantiation;
-
- case TSK_ExplicitInstantiationDeclaration:
- case TSK_ImplicitInstantiation:
- External = CodeGenModule::GVA_TemplateInstantiation;
- break;
- }
- }
-
- if (!FD->isInlined())
- return External;
-
- if (!Features.CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
- // GNU or C99 inline semantics. Determine whether this symbol should be
- // externally visible.
- if (FD->isInlineDefinitionExternallyVisible())
- return External;
-
- // C99 inline semantics, where the symbol is not externally visible.
- return CodeGenModule::GVA_C99Inline;
- }
-
- // C++0x [temp.explicit]p9:
- // [ Note: The intent is that an inline function that is the subject of
- // an explicit instantiation declaration will still be implicitly
- // instantiated when used so that the body can be considered for
- // inlining, but that no out-of-line copy of the inline function would be
- // generated in the translation unit. -- end note ]
- if (FD->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration)
- return CodeGenModule::GVA_C99Inline;
-
- return CodeGenModule::GVA_CXXInline;
-}
-
llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
- GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
+ GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
@@ -454,12 +463,10 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
F->addFnAttr(llvm::Attribute::StackProtectReq);
- if (const AlignedAttr *AA = D->getAttr<AlignedAttr>()) {
- unsigned width = Context.Target.getCharWidth();
- F->setAlignment(AA->getAlignment() / width);
- while ((AA = AA->getNext<AlignedAttr>()))
- F->setAlignment(std::max(F->getAlignment(), AA->getAlignment() / width));
- }
+ unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
+ if (alignment)
+ F->setAlignment(alignment);
+
// C++ ABI requires 2-byte alignment for member functions.
if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
F->setAlignment(2);
@@ -638,102 +645,12 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
}
-static CodeGenModule::GVALinkage
-GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
- // If this is a static data member, compute the kind of template
- // specialization. Otherwise, this variable is not part of a
- // template.
- TemplateSpecializationKind TSK = TSK_Undeclared;
- if (VD->isStaticDataMember())
- TSK = VD->getTemplateSpecializationKind();
-
- Linkage L = VD->getLinkage();
- if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
- VD->getType()->getLinkage() == UniqueExternalLinkage)
- L = UniqueExternalLinkage;
-
- switch (L) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
- return CodeGenModule::GVA_Internal;
-
- case ExternalLinkage:
- switch (TSK) {
- case TSK_Undeclared:
- case TSK_ExplicitSpecialization:
- return CodeGenModule::GVA_StrongExternal;
-
- case TSK_ExplicitInstantiationDeclaration:
- llvm_unreachable("Variable should not be instantiated");
- // Fall through to treat this like any other instantiation.
-
- case TSK_ExplicitInstantiationDefinition:
- return CodeGenModule::GVA_ExplicitTemplateInstantiation;
-
- case TSK_ImplicitInstantiation:
- return CodeGenModule::GVA_TemplateInstantiation;
- }
- }
-
- return CodeGenModule::GVA_StrongExternal;
-}
-
bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
- // Never defer when EmitAllDecls is specified or the decl has
- // attribute used.
- if (Features.EmitAllDecls || Global->hasAttr<UsedAttr>())
- return false;
-
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
- // Constructors and destructors should never be deferred.
- if (FD->hasAttr<ConstructorAttr>() ||
- FD->hasAttr<DestructorAttr>())
- return false;
-
- // The key function for a class must never be deferred.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Global)) {
- const CXXRecordDecl *RD = MD->getParent();
- if (MD->isOutOfLine() && RD->isDynamicClass()) {
- const CXXMethodDecl *KeyFunction = getContext().getKeyFunction(RD);
- if (KeyFunction &&
- KeyFunction->getCanonicalDecl() == MD->getCanonicalDecl())
- return false;
- }
- }
-
- GVALinkage Linkage = GetLinkageForFunction(getContext(), FD, Features);
-
- // static, static inline, always_inline, and extern inline functions can
- // always be deferred. Normal inline functions can be deferred in C99/C++.
- // Implicit template instantiations can also be deferred in C++.
- if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
- Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
- return true;
+ // Never defer when EmitAllDecls is specified.
+ if (Features.EmitAllDecls)
return false;
- }
- const VarDecl *VD = cast<VarDecl>(Global);
- assert(VD->isFileVarDecl() && "Invalid decl");
-
- // We never want to defer structs that have non-trivial constructors or
- // destructors.
-
- // FIXME: Handle references.
- if (const RecordType *RT = VD->getType()->getAs<RecordType>()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!RD->hasTrivialConstructor() || !RD->hasTrivialDestructor())
- return false;
- }
- }
-
- GVALinkage L = GetLinkageForVariable(getContext(), VD);
- if (L == GVA_Internal || L == GVA_TemplateInstantiation) {
- if (!(VD->getInit() && VD->getInit()->HasSideEffects(Context)))
- return true;
- }
-
- return false;
+ return !getContext().DeclMustBeEmitted(Global);
}
llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -774,6 +691,15 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Ignore declarations, they will be emitted on their first use.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ if (FD->getIdentifier()) {
+ llvm::StringRef Name = FD->getName();
+ if (Name == "_Block_object_assign") {
+ BlockObjectAssignDecl = FD;
+ } else if (Name == "_Block_object_dispose") {
+ BlockObjectDisposeDecl = FD;
+ }
+ }
+
// Forward declarations are emitted lazily on first use.
if (!FD->isThisDeclarationADefinition())
return;
@@ -781,6 +707,16 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
const VarDecl *VD = cast<VarDecl>(Global);
assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+ if (VD->getIdentifier()) {
+ llvm::StringRef Name = VD->getName();
+ if (Name == "_NSConcreteGlobalBlock") {
+ NSConcreteGlobalBlockDecl = VD;
+ } else if (Name == "_NSConcreteStackBlock") {
+ NSConcreteStackBlockDecl = VD;
+ }
+ }
+
+
if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
return;
}
@@ -792,6 +728,14 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
EmitGlobalDefinition(GD);
return;
}
+
+ // If we're deferring emission of a C++ variable with an
+ // initializer, remember the order in which it appeared in the file.
+ if (getLangOptions().CPlusPlus && isa<VarDecl>(Global) &&
+ cast<VarDecl>(Global)->hasInit()) {
+ DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
+ CXXGlobalInits.push_back(0);
+ }
// If the value has already been used, add it directly to the
// DeferredDeclsToEmit list.
@@ -816,7 +760,8 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
// At -O0, don't generate IR for functions with available_externally
// linkage.
- if (CodeGenOpts.OptimizationLevel == 0 &&
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ !Function->hasAttr<AlwaysInlineAttr>() &&
getFunctionLinkage(Function)
== llvm::Function::AvailableExternallyLinkage)
return;
@@ -1021,7 +966,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
GV->setConstant(DeclIsConstantGlobal(Context, D));
// FIXME: Merge with other attribute handling code.
- if (D->getStorageClass() == VarDecl::PrivateExtern)
+ if (D->getStorageClass() == SC_PrivateExtern)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
if (D->hasAttr<WeakAttr>() ||
@@ -1174,6 +1119,11 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
}
+ } else {
+ // We don't need an initializer, so remove the entry for the delayed
+ // initializer position (just in case this entry was delayed).
+ if (getLangOptions().CPlusPlus)
+ DelayedCXXInitPosition.erase(D);
}
}
@@ -1235,7 +1185,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
// Set the llvm linkage type as appropriate.
- GVALinkage Linkage = GetLinkageForVariable(getContext(), D);
+ GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
if (Linkage == GVA_Internal)
GV->setLinkage(llvm::Function::InternalLinkage);
else if (D->hasAttr<DLLImportAttr>())
@@ -1254,7 +1204,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
!D->hasExternalStorage() && !D->getInit() &&
- !D->getAttr<SectionAttr>()) {
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified()) {
+ // Thread local vars aren't considered common linkage.
GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
// common vars aren't constant even if declared const.
GV->setConstant(false);
@@ -1293,6 +1244,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
// TODO: Do invokes ever occur in C code? If so, we should handle them too.
llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ if (!CI) continue; // FIXME: when we allow Invoke, just do CallSite CS(*I)
llvm::CallSite CS(CI);
if (!CI || !CS.isCallee(I)) continue;
@@ -1343,7 +1295,7 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
- getMangleContext().mangleInitDiscriminator();
+ getCXXABI().getMangleContext().mangleInitDiscriminator();
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1528,18 +1480,18 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
bool TargetIsLSB,
bool &IsUTF16,
unsigned &StringLength) {
- unsigned NumBytes = Literal->getByteLength();
+ llvm::StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
// Check for simple case.
if (!Literal->containsNonAsciiOrNull()) {
StringLength = NumBytes;
- return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
- StringLength));
+ return Map.GetOrCreateValue(String);
}
// Otherwise, convert the UTF8 literals into a byte string.
llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
- const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
+ const UTF8 *FromPtr = (UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
@@ -1552,8 +1504,7 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
// this duplicate code.
assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
StringLength = NumBytes;
- return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
- StringLength));
+ return Map.GetOrCreateValue(String);
}
// ConvertUTF8toUTF16 returns the length in ToPtr.
@@ -1753,20 +1704,17 @@ CodeGenModule::GetAddrOfConstantNSString(const StringLiteral *Literal) {
/// GetStringForStringLiteral - Return the appropriate bytes for a
/// string literal, properly padded to match the literal type.
std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
- const char *StrData = E->getStrData();
- unsigned Len = E->getByteLength();
-
const ConstantArrayType *CAT =
getContext().getAsConstantArrayType(E->getType());
assert(CAT && "String isn't pointer or array!");
// Resize the string to the right size.
- std::string Str(StrData, StrData+Len);
uint64_t RealLen = CAT->getSize().getZExtValue();
if (E->isWide())
RealLen *= getContext().Target.getWCharWidth()/8;
+ std::string Str = E->getString().str();
Str.resize(RealLen, '\0');
return Str;
@@ -1894,7 +1842,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
D->getLocation(),
D->getLocation(), cxxSelector,
getContext().VoidTy, 0,
- DC, true, false, true,
+ DC, true, false, true, false,
ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
@@ -1906,7 +1854,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
D->getLocation(),
D->getLocation(), cxxSelector,
getContext().getObjCIdType(), 0,
- DC, true, false, true,
+ DC, true, false, true, false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
@@ -1993,9 +1941,16 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Forward declarations, no (immediate) code generation.
case Decl::ObjCClass:
case Decl::ObjCForwardProtocol:
- case Decl::ObjCCategory:
case Decl::ObjCInterface:
break;
+
+ case Decl::ObjCCategory: {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ if (CD->IsClassExtension() && CD->hasSynthBitfield())
+ Context.ResetObjCLayout(CD->getClassInterface());
+ break;
+ }
+
case Decl::ObjCProtocol:
Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
@@ -2009,6 +1964,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::ObjCImplementation: {
ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ if (Features.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ Context.ResetObjCLayout(OMD->getClassInterface());
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
Runtime->GenerateClass(OMD);
@@ -2118,3 +2075,88 @@ void CodeGenFunction::EmitDeclMetadata() {
}
}
}
+
+///@name Custom Runtime Function Interfaces
+///@{
+//
+// FIXME: These can be eliminated once we can have clients just get the required
+// AST nodes from the builtin tables.
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ // If we saw an explicit decl, use that.
+ if (BlockObjectDisposeDecl) {
+ return BlockObjectDispose = GetAddrOfFunction(
+ BlockObjectDisposeDecl,
+ getTypes().GetFunctionType(BlockObjectDisposeDecl));
+ }
+
+ // Otherwise construct the function by hand.
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ return BlockObjectDispose =
+ CreateRuntimeFunction(FTy, "_Block_object_dispose");
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ // If we saw an explicit decl, use that.
+ if (BlockObjectAssignDecl) {
+ return BlockObjectAssign = GetAddrOfFunction(
+ BlockObjectAssignDecl,
+ getTypes().GetFunctionType(BlockObjectAssignDecl));
+ }
+
+ // Otherwise construct the function by hand.
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ return BlockObjectAssign =
+ CreateRuntimeFunction(FTy, "_Block_object_assign");
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ // If we saw an explicit decl, use that.
+ if (NSConcreteGlobalBlockDecl) {
+ return NSConcreteGlobalBlock = GetAddrOfGlobalVar(
+ NSConcreteGlobalBlockDecl,
+ getTypes().ConvertType(NSConcreteGlobalBlockDecl->getType()));
+ }
+
+ // Otherwise construct the variable by hand.
+ return NSConcreteGlobalBlock = CreateRuntimeVariable(
+ PtrToInt8Ty, "_NSConcreteGlobalBlock");
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ // If we saw an explicit decl, use that.
+ if (NSConcreteStackBlockDecl) {
+ return NSConcreteStackBlock = GetAddrOfGlobalVar(
+ NSConcreteStackBlockDecl,
+ getTypes().ConvertType(NSConcreteStackBlockDecl->getType()));
+ }
+
+ // Otherwise construct the variable by hand.
+ return NSConcreteStackBlock = CreateRuntimeVariable(
+ PtrToInt8Ty, "_NSConcreteStackBlock");
+}
+
+///@}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 27f15fc018cd..cabff9e1cad5 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -22,7 +22,6 @@
#include "CGCall.h"
#include "CGCXX.h"
#include "CGVTables.h"
-#include "CGCXXABI.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
#include "Mangle.h"
@@ -71,6 +70,7 @@ namespace clang {
namespace CodeGen {
class CodeGenFunction;
+ class CGCXXABI;
class CGDebugInfo;
class CGObjCRuntime;
class MangleBuffer;
@@ -109,6 +109,7 @@ class CodeGenModule : public BlockModule {
const llvm::TargetData &TheTargetData;
mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
Diagnostic &Diags;
+ CGCXXABI &ABI;
CodeGenTypes Types;
/// VTables - Holds information about C++ vtables.
@@ -116,7 +117,6 @@ class CodeGenModule : public BlockModule {
friend class CodeGenVTables;
CGObjCRuntime* Runtime;
- CXXABI* ABI;
CGDebugInfo* DebugInfo;
// WeakRefReferences - A set of references that have only been seen via
@@ -162,6 +162,12 @@ class CodeGenModule : public BlockModule {
/// CXXGlobalInits - Global variables with initializers that need to run
/// before main.
std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// When a C++ decl with an initializer is deferred, null is
+ /// appended to CXXGlobalInits, and the index of that null is placed
+ /// here so that the initializer will be performed in the correct
+ /// order.
+ llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
/// - Global variables with initializers whose order of initialization
/// is set by init_priority attribute.
@@ -183,10 +189,23 @@ class CodeGenModule : public BlockModule {
/// Lazily create the Objective-C runtime
void createObjCRuntime();
- /// Lazily create the C++ ABI
- void createCXXABI();
llvm::LLVMContext &VMContext;
+
+ /// @name Cache for Blocks Runtime Globals
+ /// @{
+
+ const VarDecl *NSConcreteGlobalBlockDecl;
+ const VarDecl *NSConcreteStackBlockDecl;
+ llvm::Constant *NSConcreteGlobalBlock;
+ llvm::Constant *NSConcreteStackBlock;
+
+ const FunctionDecl *BlockObjectAssignDecl;
+ const FunctionDecl *BlockObjectDisposeDecl;
+ llvm::Constant *BlockObjectAssign;
+ llvm::Constant *BlockObjectDispose;
+
+ /// @}
public:
CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
@@ -207,15 +226,8 @@ public:
/// been configured.
bool hasObjCRuntime() { return !!Runtime; }
- /// getCXXABI() - Return a reference to the configured
- /// C++ ABI.
- CXXABI &getCXXABI() {
- if (!ABI) createCXXABI();
- return *ABI;
- }
-
- /// hasCXXABI() - Return true iff a C++ ABI has been configured.
- bool hasCXXABI() { return !!ABI; }
+ /// getCXXABI() - Return a reference to the configured C++ ABI.
+ CGCXXABI &getCXXABI() { return ABI; }
llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
return StaticLocalDeclMap[VD];
@@ -231,15 +243,11 @@ public:
const LangOptions &getLangOptions() const { return Features; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
- MangleContext &getMangleContext() {
- if (!ABI) createCXXABI();
- return ABI->getMangleContext();
- }
CodeGenVTables &getVTables() { return VTables; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
- const TargetCodeGenInfo &getTargetCodeGenInfo() const;
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
bool isTargetDarwin() const;
/// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
@@ -249,6 +257,11 @@ public:
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+ /// setTypeVisibility - Set the visibility for the given global
+ /// value which holds information about a type.
+ void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
+ bool IsForRTTI) const;
+
llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
if (isa<CXXConstructorDecl>(GD.getDecl()))
return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
@@ -289,7 +302,8 @@ public:
/// a class. Returns null if the offset is 0.
llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
- const CXXBaseSpecifierArray &BasePath);
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd);
/// GetStringForStringLiteral - Return the appropriate bytes for a string
/// literal, properly padded to match the literal type. If only the address of
@@ -344,10 +358,6 @@ public:
llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
- // GetCXXMemberFunctionPointerValue - Given a method declaration, return the
- // integer used in a member function pointer to refer to that value.
- llvm::Constant *GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD);
-
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
@@ -392,6 +402,16 @@ public:
llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
llvm::StringRef Name);
+ ///@name Custom Blocks Runtime Interfaces
+ ///@{
+
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ llvm::Constant *getBlockObjectAssign();
+ llvm::Constant *getBlockObjectDispose();
+
+ ///@}
+
void UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
@@ -411,8 +431,6 @@ public:
llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA, unsigned LineNo);
- llvm::Constant *EmitPointerToDataMember(const FieldDecl *FD);
-
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
/// \param OmitOnError - If true, then this error should only be emitted if no
@@ -473,15 +491,6 @@ public:
void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
- enum GVALinkage {
- GVA_Internal,
- GVA_C99Inline,
- GVA_CXXInline,
- GVA_StrongExternal,
- GVA_TemplateInstantiation,
- GVA_ExplicitTemplateInstantiation
- };
-
llvm::GlobalVariable::LinkageTypes
getFunctionLinkage(const FunctionDecl *FD);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index d469b906fca1..5ab65c5779b6 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -13,6 +13,7 @@
#include "CodeGenTypes.h"
#include "CGCall.h"
+#include "CGCXXABI.h"
#include "CGRecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -26,9 +27,10 @@ using namespace clang;
using namespace CodeGen;
CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
- const llvm::TargetData &TD, const ABIInfo &Info)
+ const llvm::TargetData &TD, const ABIInfo &Info,
+ CGCXXABI &CXXABI)
: Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
- TheABIInfo(Info) {
+ TheABIInfo(Info), TheCXXABI(CXXABI) {
}
CodeGenTypes::~CodeGenTypes() {
@@ -400,17 +402,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
}
case Type::MemberPointer: {
- // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
- // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
- // If we ever want to support other ABIs this needs to be abstracted.
-
- QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
- const llvm::Type *PtrDiffTy =
- ConvertTypeRecursive(Context.getPointerDiffType());
- if (ETy->isFunctionType())
- return llvm::StructType::get(TheModule.getContext(), PtrDiffTy, PtrDiffTy,
- NULL);
- return PtrDiffTy;
+ return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
}
}
@@ -491,31 +483,34 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
return *Layout;
}
-bool CodeGenTypes::ContainsPointerToDataMember(QualType T) {
+bool CodeGenTypes::isZeroInitializable(QualType T) {
// No need to check for member pointers when not compiling C++.
if (!Context.getLangOptions().CPlusPlus)
- return false;
+ return true;
T = Context.getBaseElementType(T);
+ // Records are non-zero-initializable if they contain any
+ // non-zero-initializable subobjects.
if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- return ContainsPointerToDataMember(RD);
+ return isZeroInitializable(RD);
}
-
+
+ // We have to ask the ABI about member pointers.
if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
- return !MPT->getPointeeType()->isFunctionType();
+ return getCXXABI().isZeroInitializable(MPT);
- return false;
+ // Everything else is okay.
+ return true;
}
-bool CodeGenTypes::ContainsPointerToDataMember(const CXXRecordDecl *RD) {
+bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
// FIXME: It would be better if there was a way to explicitly compute the
// record layout instead of converting to a type.
ConvertTagDeclType(RD);
const CGRecordLayout &Layout = getCGRecordLayout(RD);
- return Layout.containsPointerToDataMember();
+ return Layout.isZeroInitializable();
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index c7f48e6c9dd4..1fc2153fcadb 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -14,13 +14,12 @@
#ifndef CLANG_CODEGEN_CODEGENTYPES_H
#define CLANG_CODEGEN_CODEGENTYPES_H
+#include "CGCall.h"
+#include "GlobalDecl.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include <vector>
-#include "CGCall.h"
-#include "GlobalDecl.h"
-
namespace llvm {
class FunctionType;
class Module;
@@ -51,6 +50,7 @@ namespace clang {
typedef CanQual<Type> CanQualType;
namespace CodeGen {
+ class CGCXXABI;
class CGRecordLayout;
/// CodeGenTypes - This class organizes the cross-module state that is used
@@ -61,6 +61,7 @@ class CodeGenTypes {
llvm::Module& TheModule;
const llvm::TargetData& TheTargetData;
const ABIInfo& TheABIInfo;
+ CGCXXABI &TheCXXABI;
llvm::SmallVector<std::pair<QualType,
llvm::OpaqueType *>, 8> PointersToResolve;
@@ -102,13 +103,14 @@ private:
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
- const ABIInfo &Info);
+ const ABIInfo &Info, CGCXXABI &CXXABI);
~CodeGenTypes();
const llvm::TargetData &getTargetData() const { return TheTargetData; }
const TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ CGCXXABI &getCXXABI() const { return TheCXXABI; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
@@ -139,7 +141,7 @@ public:
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
- const llvm::Type *GetFunctionTypeForVTable(const CXXMethodDecl *MD);
+ const llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
@@ -169,7 +171,9 @@ public:
const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty,
bool IsRecursive = false);
- // getFunctionInfo - Get the function info for a member function.
+ /// getFunctionInfo - Get the function info for a member function of
+ /// the given type. This is used for calls through member function
+ /// pointers.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
@@ -205,13 +209,13 @@ public: // These are internal details of CGT that shouldn't be used externally.
void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys,
bool IsRecursive);
- /// ContainsPointerToDataMember - Return whether the given type contains a
- /// pointer to a data member.
- bool ContainsPointerToDataMember(QualType T);
+ /// IsZeroInitializable - Return whether a type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(QualType T);
- /// ContainsPointerToDataMember - Return whether the record decl contains a
- /// pointer to a data member.
- bool ContainsPointerToDataMember(const CXXRecordDecl *RD);
+ /// IsZeroInitializable - Return whether a record type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(const CXXRecordDecl *RD);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 98db75ea2b46..eefc530ccf18 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -12,28 +12,1012 @@
// documented at:
// http://www.codesourcery.com/public/cxx-abi/abi.html
// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "Mangle.h"
+#include <clang/AST/Type.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Value.h>
using namespace clang;
+using namespace CodeGen;
namespace {
-class ItaniumCXXABI : public CodeGen::CXXABI {
+class ItaniumCXXABI : public CodeGen::CGCXXABI {
+private:
+ const llvm::IntegerType *PtrDiffTy;
+protected:
CodeGen::MangleContext MangleCtx;
+ bool IsARM;
+
+ // It's a little silly for us to cache this.
+ const llvm::IntegerType *getPtrDiffTy() {
+ if (!PtrDiffTy) {
+ QualType T = getContext().getPointerDiffType();
+ const llvm::Type *Ty = CGM.getTypes().ConvertTypeRecursive(T);
+ PtrDiffTy = cast<llvm::IntegerType>(Ty);
+ }
+ return PtrDiffTy;
+ }
+
+ bool NeedsArrayCookie(QualType ElementType);
+
public:
- ItaniumCXXABI(CodeGen::CodeGenModule &CGM) :
- MangleCtx(CGM.getContext(), CGM.getDiags()) { }
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
+ CGCXXABI(CGM), PtrDiffTy(0), MangleCtx(getContext(), CGM.getDiags()),
+ IsARM(IsARM) { }
CodeGen::MangleContext &getMangleContext() {
return MangleCtx;
}
+
+ bool isZeroInitializable(const MemberPointerType *MPT);
+
+ const llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ llvm::Constant *EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E);
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+ llvm::Constant *EmitMemberPointer(const FieldDecl *FD);
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *Addr,
+ const MemberPointerType *MPT);
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ CharUnits GetArrayCookieSize(QualType ElementType);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
+
+ CharUnits GetArrayCookieSize(QualType ElementType);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+private:
+ /// \brief Returns true if the given instance method is one of the
+ /// kinds that the ARM ABI says returns 'this'.
+ static bool HasThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
+ (isa<CXXConstructorDecl>(MD)));
+ }
};
}
-CodeGen::CXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
return new ItaniumCXXABI(CGM);
}
+CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
+ return new ARMCXXABI(CGM);
+}
+
+const llvm::Type *
+ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ if (MPT->isMemberDataPointer())
+ return getPtrDiffTy();
+ else
+ return llvm::StructType::get(CGM.getLLVMContext(),
+ getPtrDiffTy(), getPtrDiffTy(), NULL);
+}
+
+/// In the Itanium and ARM ABIs, method pointers have the form:
+/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
+///
+/// In the Itanium ABI:
+/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
+/// - the this-adjustment is (memptr.adj)
+/// - the virtual offset is (memptr.ptr - 1)
+///
+/// In the ARM ABI:
+/// - method pointers are virtual if (memptr.adj & 1) is nonzero
+/// - the this-adjustment is (memptr.adj >> 1)
+/// - the virtual offset is (memptr.ptr)
+/// ARM uses 'adj' for the virtual flag because Thumb functions
+/// may be only single-byte aligned.
+///
+/// If the member is virtual, the adjusted 'this' pointer points
+/// to a vtable pointer from which the virtual offset is applied.
+///
+/// If the member is non-virtual, memptr.ptr is the address of
+/// the function to call.
+llvm::Value *
+ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+
+ const llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
+
+ llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
+ llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
+ llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
+
+ // Extract memptr.adj, which is in the second field.
+ llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
+
+ // Compute the true adjustment.
+ llvm::Value *Adj = RawAdj;
+ if (IsARM)
+ Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
+
+ // Apply the adjustment and cast back to the original struct type
+ // for consistency.
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+
+ // Load the function pointer.
+ llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual;
+ if (IsARM)
+ IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
+ else
+ IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
+ IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+
+ // In the virtual path, the adjustment left 'This' pointing to the
+ // vtable of the correct base subobject. The "function pointer" is an
+ // offset within the vtable (+1 for the virtual flag on non-ARM).
+ CGF.EmitBlock(FnVirtual);
+
+ // Cast the adjusted this to a pointer to vtable pointer and load.
+ const llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable, "memptr.vtable");
+
+ // Apply the offset.
+ llvm::Value *VTableOffset = FnAsInt;
+ if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ VTable = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Load the virtual function to call.
+ VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ CGF.EmitBranch(FnEnd);
+
+ // In the non-virtual path, the function pointer is actually a
+ // function pointer.
+ CGF.EmitBlock(FnNonVirtual);
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+
+ // We're done.
+ CGF.EmitBlock(FnEnd);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+ Callee->reserveOperandSpace(2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ return Callee;
+}
+
+/// Compute an l-value by applying the given pointer-to-member to a
+/// base object.
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+
+ // Cast to char*.
+ Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+
+ // Apply the offset, which we assume is non-null.
+ llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the
+ // address space of the base pointer.
+ const llvm::Type *PType
+ = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+/// Perform a derived-to-base or base-to-derived member pointer conversion.
+///
+/// Obligatory offset/adjustment diagram:
+/// <-- offset --> <-- adjustment -->
+/// |--------------------------|----------------------|--------------------|
+/// ^Derived address point ^Base address point ^Member address point
+///
+/// So when converting a base member pointer to a derived member pointer,
+/// we add the offset to the adjustment because the address point has
+/// decreased; and conversely, when converting a derived MP to a base MP
+/// we subtract the offset from the adjustment because the address point
+/// has increased.
+///
+/// The standard forbids (at compile time) conversion to and from
+/// virtual bases, which is why we don't have to consider them here.
+///
+/// The standard forbids (at run time) casting a derived MP to a base
+/// MP when the derived MP does not point to a member of the base.
+/// This is why -1 is a reasonable choice for null data member
+/// pointers.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ if (isa<llvm::Constant>(Src))
+ return EmitMemberPointerConversion(cast<llvm::Constant>(Src), E);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy = E->getType()->getAs<MemberPointerType>();
+
+ const CXXRecordDecl *SrcDecl = SrcTy->getClass()->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestTy->getClass()->getAsCXXRecordDecl();
+
+ bool DerivedToBase =
+ E->getCastKind() == CK_DerivedToBaseMemberPointer;
+
+ const CXXRecordDecl *BaseDecl, *DerivedDecl;
+ if (DerivedToBase)
+ DerivedDecl = SrcDecl, BaseDecl = DestDecl;
+ else
+ BaseDecl = SrcDecl, DerivedDecl = DestDecl;
+
+ llvm::Constant *Adj =
+ CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ E->path_begin(),
+ E->path_end());
+ if (!Adj) return Src;
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (SrcTy->isMemberDataPointer()) {
+ llvm::Value *Dst;
+ if (DerivedToBase)
+ Dst = Builder.CreateNSWSub(Src, Adj, "adj");
+ else
+ Dst = Builder.CreateNSWAdd(Src, Adj, "adj");
+
+ // Null check.
+ llvm::Value *Null = llvm::Constant::getAllOnesValue(Src->getType());
+ llvm::Value *IsNull = Builder.CreateICmpEQ(Src, Null, "memptr.isnull");
+ return Builder.CreateSelect(IsNull, Src, Dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t Offset = cast<llvm::ConstantInt>(Adj)->getZExtValue();
+ Offset <<= 1;
+ Adj = llvm::ConstantInt::get(Adj->getType(), Offset);
+ }
+
+ llvm::Value *SrcAdj = Builder.CreateExtractValue(Src, 1, "src.adj");
+ llvm::Value *DstAdj;
+ if (DerivedToBase)
+ DstAdj = Builder.CreateNSWSub(SrcAdj, Adj, "adj");
+ else
+ DstAdj = Builder.CreateNSWAdd(SrcAdj, Adj, "adj");
+
+ return Builder.CreateInsertValue(Src, DstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberPointerConversion(llvm::Constant *C,
+ const CastExpr *E) {
+ const MemberPointerType *SrcTy =
+ E->getSubExpr()->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy =
+ E->getType()->getAs<MemberPointerType>();
+
+ bool DerivedToBase =
+ E->getCastKind() == CK_DerivedToBaseMemberPointer;
+
+ const CXXRecordDecl *DerivedDecl;
+ if (DerivedToBase)
+ DerivedDecl = SrcTy->getClass()->getAsCXXRecordDecl();
+ else
+ DerivedDecl = DestTy->getClass()->getAsCXXRecordDecl();
+
+ // Calculate the offset to the base class.
+ llvm::Constant *Offset =
+ CGM.GetNonVirtualBaseClassOffset(DerivedDecl,
+ E->path_begin(),
+ E->path_end());
+ // If there's no offset, we're done.
+ if (!Offset) return C;
+
+ // If the source is a member data pointer, we have to do a null
+ // check and then add the offset. In the common case, we can fold
+ // away the offset.
+ if (SrcTy->isMemberDataPointer()) {
+ assert(C->getType() == getPtrDiffTy());
+
+ // If it's a constant int, just create a new constant int.
+ if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ int64_t Src = CI->getSExtValue();
+
+ // Null converts to null.
+ if (Src == -1) return CI;
+
+ // Otherwise, just add the offset.
+ int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
+ int64_t Dst = (DerivedToBase ? Src - OffsetV : Src + OffsetV);
+ return llvm::ConstantInt::get(CI->getType(), Dst, /*signed*/ true);
+ }
+
+ // Otherwise, we have to form a constant select expression.
+ llvm::Constant *Null = llvm::Constant::getAllOnesValue(C->getType());
+
+ llvm::Constant *IsNull =
+ llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, C, Null);
+
+ llvm::Constant *Dst;
+ if (DerivedToBase)
+ Dst = llvm::ConstantExpr::getNSWSub(C, Offset);
+ else
+ Dst = llvm::ConstantExpr::getNSWAdd(C, Offset);
+
+ return llvm::ConstantExpr::getSelect(IsNull, Null, Dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ int64_t OffsetV = cast<llvm::ConstantInt>(Offset)->getSExtValue();
+ OffsetV <<= 1;
+ Offset = llvm::ConstantInt::get(Offset->getType(), OffsetV);
+ }
+
+ llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
+
+ llvm::Constant *Values[2] = { CS->getOperand(0), 0 };
+ if (DerivedToBase)
+ Values[1] = llvm::ConstantExpr::getSub(CS->getOperand(1), Offset);
+ else
+ Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+}
+
+
+llvm::Constant *
+ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ const llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ if (MPT->isMemberDataPointer())
+ return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
+ llvm::Constant *Values[2] = { Zero, Zero };
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const FieldDecl *FD) {
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of
+ // the class object containing it, represented as a ptrdiff_t
+
+ QualType ClassType = getContext().getTypeDeclType(FD->getParent());
+ const llvm::StructType *ClassLTy =
+ cast<llvm::StructType>(CGM.getTypes().ConvertType(ClassType));
+
+ const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(FD->getParent());
+ unsigned FieldNo = RL.getLLVMFieldNo(FD);
+ uint64_t Offset =
+ CGM.getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
+
+ return llvm::ConstantInt::get(getPtrDiffTy(), Offset);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Get the function pointer (or index if this is a virtual function).
+ llvm::Constant *MemPtr[2];
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD);
+
+ // FIXME: We shouldn't use / 8 here.
+ uint64_t PointerWidthInBytes =
+ getContext().Target.getPointerWidth(0) / 8;
+ uint64_t VTableOffset = (Index * PointerWidthInBytes);
+
+ if (IsARM) {
+ // ARM C++ ABI 3.2.1:
+ // This ABI specifies that adj contains twice the this
+ // adjustment, plus 1 if the member function is virtual. The
+ // least significant bit of adj then makes exactly the same
+ // discrimination as the least significant bit of ptr does for
+ // Itanium.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 1);
+ } else {
+ // Itanium C++ ABI 2.3:
+ // For a virtual function, [the pointer field] is 1 plus the
+ // virtual table offset (in bytes) of the function,
+ // represented as a ptrdiff_t.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ }
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic());
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = ptrdiff_t;
+ }
+
+ llvm::Constant *Addr = CGM.GetAddrOfFunction(MD, Ty);
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(Addr, ptrdiff_t);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, 0);
+ }
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ MemPtr, 2, /*Packed=*/false);
+}
+
+/// The comparison algorithm is pretty easy: the member pointers are
+/// the same if they're either bitwise identical *or* both null.
+///
+/// ARM is different here only because null-ness is more complicated.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // Member data pointers are easy because there's a unique null
+ // value, so it just comes down to bitwise equality.
+ if (MPT->isMemberDataPointer())
+ return Builder.CreateICmp(Eq, L, R);
+
+ // For member function pointers, the tautologies are more complex.
+ // The Itanium tautology is:
+ // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
+ // The ARM tautology is:
+ // (L == R) <==> (L.ptr == R.ptr &&
+ // (L.adj == R.adj ||
+ // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
+ // The inequality tautologies have exactly the same structure, except
+ // applying De Morgan's laws.
+
+ llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
+ llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
+
+ // This condition tests whether L.ptr == R.ptr. This must always be
+ // true for equality to hold.
+ llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
+
+ // This condition, together with the assumption that L.ptr == R.ptr,
+ // tests whether the pointers are both null. ARM imposes an extra
+ // condition.
+ llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
+ llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
+
+ // This condition tests whether L.adj == R.adj. If this isn't
+ // true, the pointers are unequal unless they're both null.
+ llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
+ llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
+ llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
+
+ // Null member function pointers on ARM clear the low bit of Adj,
+ // so the zero condition has to check that neither low bit is set.
+ if (IsARM) {
+ llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
+
+ // Compute (l.adj | r.adj) & 1 and test it against zero.
+ llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
+ llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
+ llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
+ "cmp.or.adj");
+ EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
+ }
+
+ // Tie together all our conditions.
+ llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
+ Result = Builder.CreateBinOp(And, PtrEq, Result,
+ Inequality ? "memptr.ne" : "memptr.eq");
+ return Result;
+}
+
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ /// For member data pointers, this is just a check against -1.
+ if (MPT->isMemberDataPointer()) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+ llvm::Value *NegativeOne =
+ llvm::Constant::getAllOnesValue(MemPtr->getType());
+ return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
+ }
+
+ // In Itanium, a member function pointer is null if 'ptr' is null.
+ llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
+ llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
+
+ // In ARM, it's that, plus the low bit of 'adj' must be zero.
+ if (IsARM) {
+ llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
+ llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
+ llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
+ llvm::Value *IsNotVirtual = Builder.CreateICmpEQ(VirtualBit, Zero,
+ "memptr.notvirtual");
+ Result = Builder.CreateAnd(Result, IsNotVirtual);
+ }
+
+ return Result;
+}
+
+/// The Itanium ABI requires non-zero initialization only for data
+/// member pointers, for which '0' is a valid offset.
+bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ return MPT->getPointeeType()->isFunctionType();
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's initializing a
+/// base subobject.
+void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'.
+void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
+ ResTy = ArgTys[0];
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's destroying a
+/// base subobject.
+void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'
+/// for non-deleting destructors.
+void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
+
+ if (Type != Dtor_Deleting)
+ ResTy = ArgTys[0];
+}
+
+void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ /// Create the 'this' variable.
+ BuildThisParam(CGF, Params);
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(MD->isInstance());
+
+ // Check if we need a VTT parameter as well.
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ ASTContext &Context = getContext();
+
+ // FIXME: avoid the fake decl
+ QualType T = Context.getPointerType(Context.VoidPtrTy);
+ ImplicitParamDecl *VTTDecl
+ = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
+ &Context.Idents.get("vtt"), T);
+ Params.push_back(std::make_pair(VTTDecl, VTTDecl->getType()));
+ getVTTDecl(CGF) = VTTDecl;
+ }
+}
+
+void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params);
+
+ // Return 'this' from certain constructors and destructors.
+ if (HasThisReturn(CGF.CurGD))
+ ResTy = Params[0].second;
+}
+
+void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ EmitThisParam(CGF);
+
+ /// Initialize the 'vtt' slot if needed.
+ if (getVTTDecl(CGF)) {
+ getVTTValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
+ "vtt");
+ }
+}
+
+void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ ItaniumCXXABI::EmitInstanceFunctionProlog(CGF);
+
+ /// Initialize the return slot to 'this' at the start of the
+ /// function.
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(CGF.LoadCXXThis(), CGF.ReturnValue);
+}
+
+void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
+
+ // Destructor thunks in the ARM ABI have indeterminate results.
+ const llvm::Type *T =
+ cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ RValue Undef = RValue::get(llvm::UndefValue::get(T));
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
+}
+
+/************************** Array allocation cookies **************************/
+
+bool ItaniumCXXABI::NeedsArrayCookie(QualType ElementType) {
+ ElementType = getContext().getBaseElementType(ElementType);
+ const RecordType *RT = ElementType->getAs<RecordType>();
+ if (!RT) return false;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class has a non-trivial destructor, it always needs a cookie.
+ if (!RD->hasTrivialDestructor()) return true;
+
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie. Otherwise we don't need a cookie.
+ const CXXMethodDecl *UsualDeallocationFunction = 0;
+
+ // Usual deallocation functions of this form are always found on the
+ // class.
+ //
+ // FIXME: what exactly is this code supposed to do if there's an
+ // ambiguity? That's possible with using declarations.
+ DeclarationName OpName =
+ getContext().DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = RD->lookup(OpName); Op != OpEnd; ++Op) {
+ const CXXMethodDecl *Delete =
+ cast<CXXMethodDecl>((*Op)->getUnderlyingDecl());
+
+ if (Delete->isUsualDeallocationFunction()) {
+ UsualDeallocationFunction = Delete;
+ break;
+ }
+ }
+
+ // No usual deallocation function, we don't need a cookie.
+ if (!UsualDeallocationFunction)
+ return false;
+
+ // The usual deallocation function doesn't take a size_t argument,
+ // so we don't need a cookie.
+ if (UsualDeallocationFunction->getNumParams() == 1)
+ return false;
+
+ assert(UsualDeallocationFunction->getNumParams() == 2 &&
+ "Unexpected deallocation function type!");
+ return true;
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(QualType ElementType) {
+ if (!NeedsArrayCookie(ElementType))
+ return CharUnits::Zero();
+
+ // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(ElementType));
+}
+
+llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(ElementType));
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ QualType SizeTy = Ctx.getSizeType();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+
+ // The size of the cookie.
+ CharUnits CookieSize =
+ std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+
+ // Compute an offset to the cookie.
+ llvm::Value *CookiePtr = NewPtr;
+ CharUnits CookieOffset = CookieSize - SizeSize;
+ if (!CookieOffset.isZero())
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
+ CookieOffset.getQuantity());
+
+ // Write the number of elements into the appropriate slot.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateBitCast(CookiePtr,
+ CGF.ConvertType(SizeTy)->getPointerTo(AS));
+ CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ CookieSize
+ = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
+
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // Compute the allocated pointer.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ llvm::Value *NumElementsPtr = AllocPtr;
+ if (!NumElementsOffset.isZero())
+ NumElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
+ NumElementsOffset.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+CharUnits ARMCXXABI::GetArrayCookieSize(QualType ElementType) {
+ if (!NeedsArrayCookie(ElementType))
+ return CharUnits::Zero();
+
+ // On ARM, the cookie is always:
+ // struct array_cookie {
+ // std::size_t element_size; // element_size != 0
+ // std::size_t element_count;
+ // };
+ // TODO: what should we do if the allocated type actually wants
+ // greater alignment?
+ return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+}
+
+llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(ElementType));
+
+ // NewPtr is a char*.
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
+ const llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
+
+ // The cookie is always at the start of the buffer.
+ llvm::Value *CookiePtr = NewPtr;
+
+ // The first element is the element size.
+ CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
+ llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
+ Ctx.getTypeSizeInChars(ElementType).getQuantity());
+ CGF.Builder.CreateStore(ElementSize, CookiePtr);
+
+ // The second element is the element count.
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
+ CGF.Builder.CreateStore(NumElements, CookiePtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ CharUnits CookieSize = 2 * SizeSize;
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ const llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ // The cookie size is always 2 * sizeof(size_t).
+ CookieSize = 2 * SizeSize;
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // The allocated pointer is the input ptr, minus that amount.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ // The number of elements is at offset sizeof(size_t) relative to that.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ SizeSize.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
diff --git a/lib/CodeGen/Makefile b/lib/CodeGen/Makefile
index 4b93524267ae..6032dffec1a5 100644
--- a/lib/CodeGen/Makefile
+++ b/lib/CodeGen/Makefile
@@ -14,7 +14,6 @@
CLANG_LEVEL := ../..
LIBRARYNAME := clangCodeGen
-BUILD_ARCHIVE = 1
include $(CLANG_LEVEL)/Makefile
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 30ee541c00fa..e1988743b7f9 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -241,11 +241,11 @@ private:
NestedNameSpecifier *Qualifier,
DeclarationName Name,
unsigned KnownArity);
- void mangleCalledExpression(const Expr *E, unsigned KnownArity);
- void mangleExpression(const Expr *E);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
+ void mangleTemplateArgs(const ExplicitTemplateArgumentList &TemplateArgs);
void mangleTemplateArgs(TemplateName Template,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
@@ -304,6 +304,10 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
return false;
}
+ // Class members are always mangled.
+ if (D->getDeclContext()->isRecord())
+ return true;
+
// C functions and "main" are not mangled.
if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
return false;
@@ -695,7 +699,11 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// a program to refer to the anonymous union, and there is therefore no
// need to mangle its name.
const FieldDecl *FD = FindFirstNamedDataMember(RD);
- assert(FD && "Didn't find a named data member!");
+
+ // It's actually possible for various reasons for us to get here
+ // with an empty anonymous struct / union. Fortunately, it
+ // doesn't really matter what name we generate.
+ if (!FD) break;
assert(FD->getIdentifier() && "Data member name isn't an identifier!");
mangleSourceName(FD->getIdentifier());
@@ -1016,24 +1024,21 @@ CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
// ::= da # delete[]
case OO_Array_Delete: Out << "da"; break;
// ::= ps # + (unary)
- // ::= pl # +
+ // ::= pl # + (binary or unknown)
case OO_Plus:
- assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ps" : "pl"); break;
// ::= ng # - (unary)
- // ::= mi # -
+ // ::= mi # - (binary or unknown)
case OO_Minus:
- assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ng" : "mi"); break;
// ::= ad # & (unary)
- // ::= an # &
+ // ::= an # & (binary or unknown)
case OO_Amp:
- assert((Arity == 1 || Arity == 2) && "Invalid arity!");
Out << (Arity == 1? "ad" : "an"); break;
// ::= de # * (unary)
- // ::= ml # *
+ // ::= ml # * (binary or unknown)
case OO_Star:
- assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+ // Use binary when unknown.
Out << (Arity == 1? "de" : "ml"); break;
// ::= co # ~
case OO_Tilde: Out << "co"; break;
@@ -1275,7 +1280,7 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
mangleType(Proto->getResultType());
if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
- // <builtin-type> ::= v # void
+ // <builtin-type> ::= v # void
Out << 'v';
return;
}
@@ -1536,25 +1541,6 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T,
}
-void CXXNameMangler::mangleCalledExpression(const Expr *E, unsigned Arity) {
- if (E->getType() != getASTContext().OverloadTy)
- mangleExpression(E);
- // propagate arity to dependent overloads?
-
- llvm::PointerIntPair<OverloadExpr*,1> R
- = OverloadExpr::find(const_cast<Expr*>(E));
- if (R.getInt())
- Out << "an"; // &
- const OverloadExpr *Ovl = R.getPointer();
- if (const UnresolvedMemberExpr *ME = dyn_cast<UnresolvedMemberExpr>(Ovl)) {
- mangleMemberExpr(ME->getBase(), ME->isArrow(), ME->getQualifier(),
- ME->getMemberName(), Arity);
- return;
- }
-
- mangleUnresolvedName(Ovl->getQualifier(), Ovl->getName(), Arity);
-}
-
/// Mangles a member expression. Implicit accesses are not handled,
/// but that should be okay, because you shouldn't be able to
/// make an implicit access in a function template declaration.
@@ -1570,14 +1556,14 @@ void CXXNameMangler::mangleMemberExpr(const Expr *Base,
mangleUnresolvedName(Qualifier, Member, Arity);
}
-void CXXNameMangler::mangleExpression(const Expr *E) {
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// <expression> ::= <unary operator-name> <expression>
// ::= <binary operator-name> <expression> <expression>
// ::= <trinary operator-name> <expression> <expression> <expression>
- // ::= cl <expression>* E # call
+ // ::= cl <expression>* E # call
// ::= cv <type> expression # conversion with one argument
// ::= cv <type> _ <expression>* E # conversion with a different number of arguments
- // ::= st <type> # sizeof (a type)
+ // ::= st <type> # sizeof (a type)
// ::= at <type> # alignof (a type)
// ::= <template-param>
// ::= <function-param>
@@ -1644,14 +1630,14 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
}
case Expr::CXXDefaultArgExprClass:
- mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr());
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
break;
case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
Out << "cl";
- mangleCalledExpression(CE->getCallee(), CE->getNumArgs());
+ mangleExpression(CE->getCallee(), CE->getNumArgs());
for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
mangleExpression(CE->getArg(I));
Out << 'E';
@@ -1682,7 +1668,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), ME->getMemberDecl()->getDeclName(),
- UnknownArity);
+ Arity);
break;
}
@@ -1690,7 +1676,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), ME->getMemberName(),
- UnknownArity);
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
break;
}
@@ -1699,7 +1687,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
= cast<CXXDependentScopeMemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), ME->getMember(),
- UnknownArity);
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
break;
}
@@ -1708,7 +1698,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
// using something as close as possible to the original lookup
// expression.
const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
- mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), UnknownArity);
+ mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
+ if (ULE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ULE->getExplicitTemplateArgs());
break;
}
@@ -1821,13 +1813,13 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
const ConditionalOperator *CO = cast<ConditionalOperator>(E);
mangleOperatorName(OO_Conditional, /*Arity=*/3);
mangleExpression(CO->getCond());
- mangleExpression(CO->getLHS());
- mangleExpression(CO->getRHS());
+ mangleExpression(CO->getLHS(), Arity);
+ mangleExpression(CO->getRHS(), Arity);
break;
}
case Expr::ImplicitCastExprClass: {
- mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr());
+ mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr(), Arity);
break;
}
@@ -1855,7 +1847,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
}
case Expr::ParenExprClass:
- mangleExpression(cast<ParenExpr>(E)->getSubExpr());
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
break;
case Expr::DeclRefExprClass: {
@@ -1869,6 +1861,12 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
Out << 'E';
break;
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
case Decl::NonTypeTemplateParm: {
const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
mangleTemplateParameter(PD->getIndex());
@@ -1897,27 +1895,23 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
}
assert(QTy && "Qualifier was not type!");
- // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
Out << "sr";
mangleType(QualType(QTy, 0));
-
- assert(DRE->getDeclName().getNameKind() == DeclarationName::Identifier &&
- "Unhandled decl name kind!");
- mangleSourceName(DRE->getDeclName().getAsIdentifierInfo());
+ mangleUnqualifiedName(0, DRE->getDeclName(), Arity);
+ if (DRE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(DRE->getExplicitTemplateArgs());
break;
}
- case Expr::CXXBindReferenceExprClass:
- mangleExpression(cast<CXXBindReferenceExpr>(E)->getSubExpr());
- break;
-
case Expr::CXXBindTemporaryExprClass:
mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
break;
case Expr::CXXExprWithTemporariesClass:
- mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr());
+ mangleExpression(cast<CXXExprWithTemporaries>(E)->getSubExpr(), Arity);
break;
case Expr::FloatingLiteralClass: {
@@ -1974,13 +1968,10 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
}
case Expr::StringLiteralClass: {
- // Proposal from David Vandervoorde, 2010.06.30.
- // I've sent a comment off asking whether this needs to also
- // represent the length of the string.
+ // Revised proposal from David Vandervoorde, 2010.07.15.
Out << 'L';
- const ConstantArrayType *T = cast<ConstantArrayType>(E->getType());
- QualType CharTy = T->getElementType().getUnqualifiedType();
- mangleType(CharTy);
+ assert(isa<ConstantArrayType>(E->getType()));
+ mangleType(E->getType());
Out << 'E';
break;
}
@@ -2035,6 +2026,15 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
+void CXXNameMangler::mangleTemplateArgs(
+ const ExplicitTemplateArgumentList &TemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned I = 0, E = TemplateArgs.NumTemplateArgs; I != E; ++I)
+ mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[I].getArgument());
+ Out << 'E';
+}
+
void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index da0fdb616d6c..9407335e3283 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -41,8 +41,6 @@ public:
MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
: Context(C), Out(Res) { }
- llvm::raw_svector_ostream &getStream() { return Out; }
-
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?");
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD);
@@ -110,15 +108,43 @@ public:
llvm::SmallVectorImpl<char> &);
};
-class MicrosoftCXXABI : public CXXABI {
+class MicrosoftCXXABI : public CGCXXABI {
MicrosoftMangleContext MangleCtx;
public:
MicrosoftCXXABI(CodeGenModule &CGM)
- : MangleCtx(CGM.getContext(), CGM.getDiags()) {}
+ : CGCXXABI(CGM), MangleCtx(CGM.getContext(), CGM.getDiags()) {}
MicrosoftMangleContext &getMangleContext() {
return MangleCtx;
}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ // TODO: 'for base' flag
+ }
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ // TODO: 'for base' flag
+ }
};
}
@@ -893,6 +919,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
switch (T->getCallConv()) {
case CC_Default:
case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
case CC_X86ThisCall: Out << 'E'; break;
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
@@ -1185,7 +1212,7 @@ void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
assert(false && "Can't yet mangle destructors!");
}
-CXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
return new MicrosoftCXXABI(CGM);
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index c65f20371552..4d221d4e657e 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -36,14 +36,36 @@ static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
}
}
+static bool isAggregateTypeForABI(QualType T) {
+ return CodeGenFunction::hasAggregateLLVMType(T) ||
+ T->isMemberFunctionPointerType();
+}
+
ABIInfo::~ABIInfo() {}
+ASTContext &ABIInfo::getContext() const {
+ return CGT.getContext();
+}
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::TargetData &ABIInfo::getTargetData() const {
+ return CGT.getTargetData();
+}
+
+
void ABIArgInfo::dump() const {
llvm::raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
switch (TheKind) {
case Direct:
- OS << "Direct";
+ OS << "Direct Type=";
+ if (const llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
break;
case Extend:
OS << "Extend";
@@ -51,10 +73,6 @@ void ABIArgInfo::dump() const {
case Ignore:
OS << "Ignore";
break;
- case Coerce:
- OS << "Coerce Type=";
- getCoerceToType()->print(OS);
- break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
<< " Byal=" << getIndirectByVal();
@@ -129,7 +147,7 @@ static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
if (!RD)
return false;
-
+
return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
}
@@ -162,7 +180,7 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
return 0;
const Type *Found = 0;
-
+
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
@@ -205,7 +223,7 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
FT = AT->getElementType();
}
- if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ if (!isAggregateTypeForABI(FT)) {
Found = FT.getTypePtr();
} else {
Found = isSingleElementStruct(FT, Context);
@@ -272,23 +290,17 @@ namespace {
/// self-consistent and sensible LLVM IR generation, but does not
/// conform to any particular ABI.
class DefaultABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
+ it->info = classifyArgumentType(it->type);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -297,7 +309,8 @@ class DefaultABIInfo : public ABIInfo {
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
};
llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -305,10 +318,8 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return 0;
}
-ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty))
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0);
// Treat an enum type as its underlying type.
@@ -322,10 +333,9 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
//===----------------------------------------------------------------------===//
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
-
+
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
- ASTContext &Context;
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
@@ -337,41 +347,31 @@ class X86_32ABIInfo : public ABIInfo {
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
- bool ByVal = true) const;
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
public:
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
+ it->info = classifyArgumentType(it->type);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
- X86_32ABIInfo(ASTContext &Context, bool d, bool p)
- : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
- IsSmallStructInRegABI(p) {}
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
- :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
@@ -443,153 +443,172 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
return true;
}
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
// On Darwin, some vectors are returned in registers.
if (IsDarwinVectorABI) {
- uint64_t Size = Context.getTypeSize(RetTy);
+ uint64_t Size = getContext().getTypeSize(RetTy);
// 128-bit vectors are a special case; they are returned in
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
- return ABIArgInfo::getCoerce(llvm::VectorType::get(
- llvm::Type::getInt64Ty(VMContext), 2));
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
return ABIArgInfo::getIndirect(0);
}
return ABIArgInfo::getDirect();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
-
+
// Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0);
}
-
+
// If specified, structs and unions are always indirect.
if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
return ABIArgInfo::getIndirect(0);
// Classify "single element" structs as their element type.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) {
if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
if (BT->isIntegerType()) {
// We need to use the size of the structure, padding
// bit-fields can adjust that to be larger than the single
// element type.
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(
- llvm::IntegerType::get(VMContext, (unsigned) Size));
- } else if (BT->getKind() == BuiltinType::Float) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), (unsigned)Size));
+ }
+
+ if (BT->getKind() == BuiltinType::Float) {
+ assert(getContext().getTypeSize(RetTy) ==
+ getContext().getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
- } else if (BT->getKind() == BuiltinType::Double) {
- assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
+ }
+
+ if (BT->getKind() == BuiltinType::Double) {
+ assert(getContext().getTypeSize(RetTy) ==
+ getContext().getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
+ return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
}
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
- const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- return ABIArgInfo::getCoerce(PtrTy);
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
+ return ABIArgInfo::getDirect(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
// register when inside a structure.
- uint64_t Size = Context.getTypeSize(RetTy);
+ uint64_t Size = getContext().getTypeSize(RetTy);
if (Size == 64 || Size == 128)
return ABIArgInfo::getIndirect(0);
- return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
+ return classifyReturnType(QualType(SeltTy, 0));
}
}
// Small structures which are register sized are generally returned
// in a register.
- if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
- uint64_t Size = Context.getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
return ABIArgInfo::getIndirect(0);
- } else {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
-ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
- ASTContext &Context,
- bool ByVal) const {
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
if (!ByVal)
return ABIArgInfo::getIndirect(0, false);
// Compute the byval alignment. We trust the back-end to honor the
// minimum ABI alignment for byval, to make cleaner IR.
const unsigned MinABIAlign = 4;
- unsigned Align = Context.getTypeAlign(Ty) / 8;
+ unsigned Align = getContext().getTypeAlign(Ty) / 8;
if (Align > MinABIAlign)
return ABIArgInfo::getIndirect(Align);
return ABIArgInfo::getIndirect(0);
}
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
// FIXME: Set alignment on indirect arguments.
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
if (const RecordType *RT = Ty->getAs<RecordType>()) {
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (hasNonTrivialDestructorOrCopyConstructor(RT))
- return getIndirectResult(Ty, Context, /*ByVal=*/false);
+ return getIndirectResult(Ty, /*ByVal=*/false);
if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty, Context);
+ return getIndirectResult(Ty);
}
// Ignore empty structs.
- if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
+ if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0)
return ABIArgInfo::getIgnore();
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
- if (Context.getTypeSize(Ty) <= 4*32 &&
- canExpandIndirectArgument(Ty, Context))
+ if (getContext().getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, getContext()))
return ABIArgInfo::getExpand();
- return getIndirectResult(Ty, Context);
- } else {
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ return getIndirectResult(Ty);
+ }
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getDirect();
}
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -637,7 +656,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
-
+
// 0-7 are the eight integer registers; the order is different
// on Darwin (for EH), but the range is the same.
// 8 is %eip.
@@ -649,7 +668,7 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// platforms with 8-byte alignment for that type.
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
-
+
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
// reason.
@@ -673,9 +692,6 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
namespace {
/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
- ASTContext &Context;
- const llvm::TargetData &TD;
-
enum Class {
Integer = 0,
SSE,
@@ -721,17 +737,13 @@ class X86_64ABIInfo : public ABIInfo {
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
- /// getCoerceResult - Given a source type \arg Ty and an LLVM type
- /// to coerce to, chose the best way to pass Ty in the same place
- /// that \arg CoerceTo would be passed, but while keeping the
- /// emitted code as simple as possible.
- ///
- /// FIXME: Note, this should be cleaned up to just take an enumeration of all
- /// the ways we might want to pass things, instead of constructing an LLVM
- /// type. This makes this code more explicit, and it makes it clearer that we
- /// are also doing this for correctness in the case of passing scalar types.
- ABIArgInfo getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo) const;
+ const llvm::Type *Get16ByteVectorType(QualType Ty) const;
+ const llvm::Type *GetSSETypeAtOffset(const llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ const llvm::Type *GetINTEGERTypeAtOffset(const llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
@@ -741,23 +753,24 @@ class X86_64ABIInfo : public ABIInfo {
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty) const;
- ABIArgInfo classifyReturnType(QualType RetTy,
- llvm::LLVMContext &VMContext) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty,
- llvm::LLVMContext &VMContext,
- unsigned &neededInt,
- unsigned &neededSSE,
- const llvm::Type *PrefType) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &neededInt,
+ unsigned &neededSSE) const;
public:
- X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td)
- : Context(Ctx), TD(td) {}
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const;
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public X86_64ABIInfo {
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : X86_64ABIInfo(CGT) {}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -765,8 +778,33 @@ public:
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD)
- : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {}
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(Builder, Address, Eight8, 0, 16);
+
+ return false;
+ }
+};
+
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 7;
@@ -865,18 +903,18 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
return;
}
-
+
if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
return;
}
-
+
if (Ty->hasPointerRepresentation()) {
Current = Integer;
return;
}
-
+
if (Ty->isMemberPointerType()) {
if (Ty->isMemberFunctionPointerType())
Lo = Hi = Integer;
@@ -884,9 +922,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = Integer;
return;
}
-
+
if (const VectorType *VT = Ty->getAs<VectorType>()) {
- uint64_t Size = Context.getTypeSize(VT);
+ uint64_t Size = getContext().getTypeSize(VT);
if (Size == 32) {
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
// float> as integer.
@@ -904,7 +942,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
return;
// gcc passes <1 x long long> as INTEGER.
- if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
Current = Integer;
else
Current = SSE;
@@ -919,37 +960,37 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
}
return;
}
-
+
if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType ET = Context.getCanonicalType(CT->getElementType());
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
- uint64_t Size = Context.getTypeSize(Ty);
+ uint64_t Size = getContext().getTypeSize(Ty);
if (ET->isIntegralOrEnumerationType()) {
if (Size <= 64)
Current = Integer;
else if (Size <= 128)
Lo = Hi = Integer;
- } else if (ET == Context.FloatTy)
+ } else if (ET == getContext().FloatTy)
Current = SSE;
- else if (ET == Context.DoubleTy)
+ else if (ET == getContext().DoubleTy)
Lo = Hi = SSE;
- else if (ET == Context.LongDoubleTy)
+ else if (ET == getContext().LongDoubleTy)
Current = ComplexX87;
// If this complex type crosses an eightbyte boundary then it
// should be split.
uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
if (Hi == NoClass && EB_Real != EB_Imag)
Hi = Lo;
-
+
return;
}
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
- uint64_t Size = Context.getTypeSize(Ty);
+ uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than two eightbytes, ..., it has class MEMORY.
@@ -960,13 +1001,13 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// fields, it has class MEMORY.
//
// Only need to check alignment of array base.
- if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
return;
// Otherwise implement simplified merge. We could be smarter about
// this, but it isn't worth it and would be harder to verify.
Current = NoClass;
- uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
@@ -983,9 +1024,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
return;
}
-
+
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- uint64_t Size = Context.getTypeSize(Ty);
+ uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than two eightbytes, ..., it has class MEMORY.
@@ -1004,7 +1045,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
if (RD->hasFlexibleArrayMember())
return;
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
// Reset Lo class, this will be recomputed.
Current = NoClass;
@@ -1031,10 +1072,6 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
if (Lo == Memory || Hi == Memory)
break;
}
-
- // If this record has no fields but isn't empty, classify as INTEGER.
- if (RD->field_empty() && Size)
- Current = Integer;
}
// Classify the fields one at a time, merging the results.
@@ -1048,7 +1085,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// fields, it has class MEMORY.
//
// Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Lo = Memory;
return;
}
@@ -1070,7 +1107,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
continue;
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+ uint64_t Size =
+ i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue();
uint64_t EB_Lo = Offset / 64;
uint64_t EB_Hi = (Offset + Size - 1) / 64;
@@ -1110,52 +1148,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
}
}
-ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
- const llvm::Type *CoerceTo) const {
- if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) {
- // Integer and pointer types will end up in a general purpose
- // register.
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation())
- return (Ty->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
-
- // If this is a 8/16/32-bit structure that is passed as an int64, then it
- // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same
- // as how an i8/i16/i32 is passed. Coerce to a i8/i16/i32 instead of a i64.
- switch (Context.getTypeSizeInChars(Ty).getQuantity()) {
- default: break;
- case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break;
- case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break;
- case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break;
- }
-
- } else if (CoerceTo->isDoubleTy()) {
- assert(Ty.isCanonical() && "should always have a canonical type here");
- assert(!Ty.hasQualifiers() && "should never have a qualified type here");
-
- // Float and double end up in a single SSE reg.
- if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
- return ABIArgInfo::getDirect();
-
- // If this is a 32-bit structure that is passed as a double, then it will be
- // passed in the low 32-bits of the XMM register, which is the same as how a
- // float is passed. Coerce to a float instead of a double.
- if (Context.getTypeSizeInChars(Ty).getQuantity() == 4)
- CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
- }
-
- return ABIArgInfo::getCoerce(CoerceTo);
-}
-
ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
- if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -1170,7 +1166,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
- if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -1185,14 +1181,297 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
// Compute the byval alignment. We trust the back-end to honor the
// minimum ABI alignment for byval, to make cleaner IR.
const unsigned MinABIAlign = 8;
- unsigned Align = Context.getTypeAlign(Ty) / 8;
+ unsigned Align = getContext().getTypeAlign(Ty) / 8;
if (Align > MinABIAlign)
return ABIArgInfo::getIndirect(Align);
return ABIArgInfo::getIndirect(0);
}
+/// Get16ByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM register. Pick an LLVM IR type that will be passed as a
+/// vector register.
+const llvm::Type *X86_64ABIInfo::Get16ByteVectorType(QualType Ty) const {
+ const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
+
+ // Wrapper structs that just contain vectors are passed just like vectors,
+ // strip them off if present.
+ const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ while (STy && STy->getNumElements() == 1) {
+ IRType = STy->getElementType(0);
+ STy = dyn_cast<llvm::StructType>(IRType);
+ }
+
+ // If the preferred type is a 16-byte vector, prefer to pass it.
+ if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ const llvm::Type *EltTy = VT->getElementType();
+ if (VT->getBitWidth() == 128 &&
+ (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
+ EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
+ EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
+ EltTy->isIntegerTy(128)))
+ return VT;
+ }
+
+ return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffset(Base);
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(i->getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
+/// float member at the specified offset. For example, {int,{float}} has a
+/// float at offset 4. It is conservatively correct for this routine to return
+/// false.
+static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ const llvm::TargetData &TD) {
+ // Base case if we find a float.
+ if (IROffset == 0 && IRType->isFloatTy())
+ return true;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ const llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset/EltSize*EltSize;
+ return ContainsFloatAtOffset(EltTy, IROffset, TD);
+ }
+
+ return false;
+}
+
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+const llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // The only three choices we have are either double, <2 x float>, or float. We
+ // pass as float if the last 4 bytes is just padding. This happens for
+ // structs that contain 3 floats.
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
+ SourceOffset*8+64, getContext()))
+ return llvm::Type::getFloatTy(getVMContext());
+
+ // We want to pass as <2 x float> if the LLVM IR type contains a float at
+ // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
+ // case.
+ if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+const llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(const llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32)) {
+ unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ const llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static const llvm::Type *
+GetX86_64ByValArgumentPair(const llvm::Type *Lo, const llvm::Type *Hi,
+ const llvm::TargetData &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ unsigned HiAlign = TD.getABITypeAlignment(Hi);
+ unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are only two sorts of types the ABI generation code can produce for
+ // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
+ // Promote these to a larger type.
+ if (Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ const llvm::StructType *Result =
+ llvm::StructType::get(Lo->getContext(), Lo, Hi, NULL);
+
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
ABIArgInfo X86_64ABIInfo::
-classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
+classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
@@ -1200,13 +1479,18 @@ classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
case SSEUp:
case X87Up:
@@ -1220,30 +1504,47 @@ classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
- ResType = llvm::Type::getInt64Ty(VMContext); break;
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0,
+ RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ RetTy->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+ break;
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
- ResType = llvm::Type::getDoubleTy(VMContext); break;
+ ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 0, RetTy, 0);
+ break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
// returned on the X87 stack in %st0 as 80-bit x87 number.
case X87:
- ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
// AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
// part of the value is returned in %st0 and the imaginary part in
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(VMContext,
- llvm::Type::getX86_FP80Ty(VMContext),
- llvm::Type::getX86_FP80Ty(VMContext),
+ ResType = llvm::StructType::get(getVMContext(),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
NULL);
break;
}
+ const llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
@@ -1252,15 +1553,19 @@ classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
assert(0 && "Invalid classification for hi word.");
case ComplexX87: // Previously handled.
- case NoClass: break;
+ case NoClass:
+ break;
case Integer:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getInt64Ty(VMContext), NULL);
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
+ 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
break;
case SSE:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
@@ -1269,7 +1574,7 @@ classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// SSEUP should always be preceeded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ ResType = Get16ByteVectorType(RetTy);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
@@ -1279,51 +1584,32 @@ classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
// anything. However, in some cases with unions it may not be
// preceeded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
- if (Lo != X87)
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy),
+ 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
break;
}
-
- return getCoerceResult(RetTy, ResType);
-}
-
-static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType,
- unsigned Offset,
- const llvm::TargetData &TD) {
- if (PrefType == 0) return 0;
-
- // Pointers are always 8-bytes at offset 0.
- if (Offset == 0 && isa<llvm::PointerType>(PrefType))
- return PrefType;
-
- // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that
- // the "hole" is not used in the containing struct (just undef padding).
- const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType);
- if (STy == 0) return 0;
-
- // If this is a struct, recurse into the field at the specified offset.
- const llvm::StructLayout *SL = TD.getStructLayout(STy);
- if (Offset >= SL->getSizeInBytes()) return 0;
-
- unsigned FieldIdx = SL->getElementContainingOffset(Offset);
- Offset -= SL->getElementOffset(FieldIdx);
- return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD);
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
- llvm::LLVMContext &VMContext,
- unsigned &neededInt,
- unsigned &neededSSE,
- const llvm::Type *PrefType)const{
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
+ unsigned &neededSSE) const {
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, 0, Lo, Hi);
// Check some invariants.
// FIXME: Enforce these by construction.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
neededInt = 0;
@@ -1331,7 +1617,13 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
// AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
// on the stack.
@@ -1351,16 +1643,23 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
// available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
// and %r9 is used.
case Integer:
- // It is always safe to classify this as an i64 argument.
- ResType = llvm::Type::getInt64Ty(VMContext);
++neededInt;
-
- // If we can choose a better 8-byte type based on the preferred type, and if
- // that type is still passed in a GPR, use it.
- if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD))
- if (isa<llvm::IntegerType>(PrefTypeLo) ||
- isa<llvm::PointerType>(PrefTypeLo))
- ResType = PrefTypeLo;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+
break;
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
@@ -1368,10 +1667,11 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
// order from %xmm0 to %xmm7.
case SSE:
++neededSSE;
- ResType = llvm::Type::getDoubleTy(VMContext);
+ ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
break;
}
+ const llvm::Type *HighPart = 0;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
// never occur as hi classes, and X87Up must be preceed by X87,
@@ -1383,49 +1683,49 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
break;
case NoClass: break;
-
- case Integer: {
- // It is always safe to classify this as an i64 argument.
- const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext);
+
+ case Integer:
++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
- // If we can choose a better 8-byte type based on the preferred type, and if
- // that type is still passed in a GPR, use it.
- if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD))
- if (isa<llvm::IntegerType>(PrefTypeHi) ||
- isa<llvm::PointerType>(PrefTypeHi))
- HiType = PrefTypeHi;
-
- ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL);
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
break;
- }
// X87Up generally doesn't occur here (long double is passed in
// memory), except in situations involving unions.
case X87Up:
case SSE:
- ResType = llvm::StructType::get(VMContext, ResType,
- llvm::Type::getDoubleTy(VMContext), NULL);
+ HighPart = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
++neededSSE;
break;
// AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
// eightbyte is passed in the upper half of the last used SSE
- // register.
+ // register. This only happens when 128-bit vectors are passed.
case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = Get16ByteVectorType(Ty);
break;
}
- return getCoerceResult(Ty, ResType);
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
}
-void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext);
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
// Keep track of the number of assigned registers.
unsigned freeIntRegs = 6, freeSSERegs = 8;
@@ -1439,17 +1739,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
// get assigned (in left-to-right order) for passing as follows...
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
- // If the client specified a preferred IR type to use, pass it down to
- // classifyArgumentType.
- const llvm::Type *PrefType = 0;
- if (NumPrefTypes) {
- PrefType = *PrefTypes++;
- --NumPrefTypes;
- }
-
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, VMContext,
- neededInt, neededSSE, PrefType);
+ it->info = classifyArgumentType(it->type, neededInt, neededSSE);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
@@ -1527,9 +1818,9 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// i8* reg_save_area;
// };
unsigned neededInt, neededSSE;
-
+
Ty = CGF.getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0);
+ ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
@@ -1591,13 +1882,13 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
"reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
- assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
const llvm::Type *TyLo = ST->getElementType(0);
const llvm::Type *TyHi = ST->getElementType(1);
- assert((TyLo->isFloatingPointTy() ^ TyHi->isFloatingPointTy()) &&
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
"Unexpected ABI info for mixed regs");
const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
@@ -1674,7 +1965,28 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return ResAddr;
}
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
//===----------------------------------------------------------------------===//
// PIC16 ABI Implementation
@@ -1683,23 +1995,18 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
namespace {
class PIC16ABIInfo : public ABIInfo {
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
-
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
+public:
+ PIC16ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
+ it->info = classifyArgumentType(it->type);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -1708,14 +2015,13 @@ class PIC16ABIInfo : public ABIInfo {
class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
+ PIC16TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PIC16ABIInfo(CGT)) {}
};
}
-ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType()) {
return ABIArgInfo::getIgnore();
} else {
@@ -1723,9 +2029,7 @@ ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
}
}
-ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty) const {
return ABIArgInfo::getDirect();
}
@@ -1759,13 +2063,15 @@ llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
namespace {
class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
// This is recovered from gcc output.
return 1; // r1 is the dedicated stack pointer
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const;
+ llvm::Value *Address) const;
};
}
@@ -1809,7 +2115,7 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 113: sfp
AssignToArrayRange(Builder, Address, Four8, 109, 113);
- return false;
+ return false;
}
@@ -1831,23 +2137,15 @@ private:
ABIKind Kind;
public:
- ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
private:
ABIKind getABIKind() const { return Kind; }
- ABIArgInfo classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMCOntext) const;
-
- ABIArgInfo classifyArgumentType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const;
+ virtual void computeInfo(CGFunctionInfo &FI) const;
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
@@ -1855,8 +2153,8 @@ private:
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
- :TargetCodeGenInfo(new ARMABIInfo(K)) {}
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
return 13;
@@ -1865,18 +2163,13 @@ public:
}
-void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
- VMContext);
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- it->info = classifyArgumentType(it->type, Context, VMContext);
- }
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
- const llvm::Triple &Triple(Context.Target.getTriple());
+ const llvm::Triple &Triple(getContext().Target.getTriple());
llvm::CallingConv::ID DefaultCC;
if (Triple.getEnvironmentName() == "gnueabi" ||
Triple.getEnvironmentName() == "eabi")
@@ -1901,10 +2194,8 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
}
}
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -1914,7 +2205,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
}
// Ignore empty records.
- if (isEmptyRecord(Context, Ty, true))
+ if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
// Structures with either a non-trivial destructor or a non-trivial
@@ -1927,21 +2218,21 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
// FIXME: This doesn't handle alignment > 64 bits.
const llvm::Type* ElemTy;
unsigned SizeRegs;
- if (Context.getTypeAlign(Ty) > 32) {
- ElemTy = llvm::Type::getInt64Ty(VMContext);
- SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ if (getContext().getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
} else {
- ElemTy = llvm::Type::getInt32Ty(VMContext);
- SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
}
std::vector<const llvm::Type*> LLVMFields;
LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
- const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
- return ABIArgInfo::getCoerce(STy);
+ const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields,
+ true);
+ return ABIArgInfo::getDirect(STy);
}
-static bool isIntegerLikeType(QualType Ty,
- ASTContext &Context,
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
llvm::LLVMContext &VMContext) {
// APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
// is called integer-like if its size is less than or equal to one word, and
@@ -2011,7 +2302,7 @@ static bool isIntegerLikeType(QualType Ty,
if (!isIntegerLikeType(FD->getType(), Context, VMContext))
return false;
-
+
// Only allow at most one field in a structure. This doesn't match the
// wording above, but follows gcc in situations with a field following an
// empty structure.
@@ -2026,13 +2317,11 @@ static bool isIntegerLikeType(QualType Ty,
return true;
}
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
@@ -2048,7 +2337,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// Are we following APCS?
if (getABIKind() == APCS) {
- if (isEmptyRecord(Context, RetTy, false))
+ if (isEmptyRecord(getContext(), RetTy, false))
return ABIArgInfo::getIgnore();
// Complex types are all returned as packed integers.
@@ -2056,18 +2345,18 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// FIXME: Consider using 2 x vector types if the back end handles them
// correctly.
if (RetTy->isAnyComplexType())
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(
- VMContext, Context.getTypeSize(RetTy)));
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ getContext().getTypeSize(RetTy)));
// Integer like structures are returned in r0.
- if (isIntegerLikeType(RetTy, Context, VMContext)) {
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
// Return in the smallest viable integer type.
- uint64_t Size = Context.getTypeSize(RetTy);
+ uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
// Otherwise return in memory.
@@ -2076,19 +2365,19 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// Otherwise this is an AAPCS variant.
- if (isEmptyRecord(Context, RetTy, true))
+ if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
// Aggregates <= 4 bytes are returned in r0; other aggregates
// are returned indirectly.
- uint64_t Size = Context.getTypeSize(RetTy);
+ uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 32) {
// Return in the smallest viable integer type.
if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
return ABIArgInfo::getIndirect(0);
@@ -2118,21 +2407,19 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
return AddrTyped;
}
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+
+ if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
- } else {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
- return (RetTy->isPromotableIntegerType() ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
@@ -2142,23 +2429,19 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
namespace {
class SystemZABIInfo : public ABIInfo {
- bool isPromotableIntegerType(QualType Ty) const;
+public:
+ SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
- ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ bool isPromotableIntegerType(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
- llvm::LLVMContext &VMContext) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
- virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
- llvm::LLVMContext &VMContext,
- const llvm::Type *const *PrefTypes,
- unsigned NumPrefTypes) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
- Context, VMContext);
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it)
- it->info = classifyArgumentType(it->type, Context, VMContext);
+ it->info = classifyArgumentType(it->type);
}
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
@@ -2167,7 +2450,8 @@ class SystemZABIInfo : public ABIInfo {
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
};
}
@@ -2199,28 +2483,22 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
}
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (RetTy->isVoidType()) {
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
- } else {
- return (isPromotableIntegerType(RetTy) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &VMContext) const {
- if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0);
- } else {
- return (isPromotableIntegerType(Ty) ?
- ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
- }
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
@@ -2231,7 +2509,8 @@ namespace {
class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
public:
- MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const;
};
@@ -2270,14 +2549,15 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
namespace {
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
public:
- MIPSTargetCodeGenInfo(): TargetCodeGenInfo(new DefaultABIInfo()) {}
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
return 29;
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const;
+ llvm::Value *Address) const;
};
}
@@ -2315,7 +2595,7 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
}
-const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
@@ -2325,56 +2605,61 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
const llvm::Triple &Triple = getContext().Target.getTriple();
switch (Triple.getArch()) {
default:
- return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
- return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types));
case llvm::Triple::arm:
case llvm::Triple::thumb:
// FIXME: We want to know the float calling convention as well.
if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
return *(TheTargetCodeGenInfo =
- new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
+ new ARMTargetCodeGenInfo(Types, ARMABIInfo::APCS));
return *(TheTargetCodeGenInfo =
- new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
+ new ARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS));
case llvm::Triple::pic16:
- return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo(Types));
case llvm::Triple::ppc:
- return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::systemz:
- return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
case llvm::Triple::msp430:
- return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
case llvm::Triple::x86:
switch (Triple.getOS()) {
case llvm::Triple::Darwin:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Context, true, true));
+ new X86_32TargetCodeGenInfo(Types, true, true));
case llvm::Triple::Cygwin:
case llvm::Triple::MinGW32:
- case llvm::Triple::MinGW64:
case llvm::Triple::AuroraUX:
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Context, false, true));
+ new X86_32TargetCodeGenInfo(Types, false, true));
default:
return *(TheTargetCodeGenInfo =
- new X86_32TargetCodeGenInfo(Context, false, false));
+ new X86_32TargetCodeGenInfo(Types, false, false));
}
case llvm::Triple::x86_64:
- return *(TheTargetCodeGenInfo =
- new X86_64TargetCodeGenInfo(Context, TheTargetData));
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ case llvm::Triple::MinGW64:
+ case llvm::Triple::Cygwin:
+ return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ default:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types));
+ }
}
}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index f0a78243ff79..9d4cf1610308 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -47,6 +47,16 @@ namespace clang {
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const { }
+ /// Determines the size of struct _Unwind_Exception on this platform,
+ /// in 8-bit units. The Itanium ABI defines this as:
+ /// struct _Unwind_Exception {
+ /// uint64 exception_class;
+ /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ /// uint64 private_1;
+ /// uint64 private_2;
+ /// };
+ unsigned getSizeOfUnwindException() const { return 32; }
+
/// Controls whether __builtin_extend_pointer should sign-extend
/// pointers to uint64_t or zero-extend them (the default). Has
/// no effect for targets: