aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp')
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp355
1 files changed, 173 insertions, 182 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 810b28f25fa1..d9f44f4be617 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -15,6 +15,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "EHScopeStack.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
@@ -24,6 +25,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -33,6 +35,10 @@ using namespace CodeGen;
// Aggregate Expression Emitter
//===----------------------------------------------------------------------===//
+namespace llvm {
+extern cl::opt<bool> EnableSingleByteCoverage;
+} // namespace llvm
+
namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
@@ -72,15 +78,11 @@ public:
/// then loads the result into DestPtr.
void EmitAggLoadOfLValue(const Expr *E);
- enum ExprValueKind {
- EVK_RValue,
- EVK_NonRValue
- };
-
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
/// SrcIsRValue is true if source comes from an RValue.
void EmitFinalDestCopy(QualType type, const LValue &src,
- ExprValueKind SrcValueKind = EVK_NonRValue);
+ CodeGenFunction::ExprValueKind SrcValueKind =
+ CodeGenFunction::EVK_NonRValue);
void EmitFinalDestCopy(QualType type, RValue src);
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
@@ -129,15 +131,12 @@ public:
EnsureDest(E->getType());
if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
- Address StoreDest = Dest.getAddress();
- // The emitted value is guaranteed to have the same size as the
- // destination but can have a different type. Just do a bitcast in this
- // case to avoid incorrect GEPs.
- if (Result->getType() != StoreDest.getType())
- StoreDest = StoreDest.withElementType(Result->getType());
-
- CGF.EmitAggregateStore(Result, StoreDest,
- E->getType().isVolatileQualified());
+ CGF.CreateCoercedStore(
+ Result, Dest.getAddress(),
+ llvm::TypeSize::getFixed(
+ Dest.getPreferredSize(CGF.getContext(), E->getType())
+ .getQuantity()),
+ E->getType().isVolatileQualified());
return;
}
return Visit(E->getSubExpr());
@@ -235,6 +234,9 @@ public:
RValue Res = CGF.EmitAtomicExpr(E);
EmitFinalDestCopy(E->getType(), Res);
}
+ void VisitPackIndexingExpr(PackIndexingExpr *E) {
+ Visit(E->getSelectedExpr());
+ }
};
} // end anonymous namespace.
@@ -287,10 +289,10 @@ void AggExprEmitter::withReturnValueSlot(
// Otherwise, EmitCall will emit its own, notice that it's "unused", and end
// its lifetime before we have the chance to emit a proper destructor call.
bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
- (RequiresDestruction && !Dest.getAddress().isValid());
+ (RequiresDestruction && Dest.isIgnored());
Address RetAddr = Address::invalid();
- Address RetAllocaAddr = Address::invalid();
+ RawAddress RetAllocaAddr = RawAddress::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
llvm::Value *LifetimeSizePtr = nullptr;
@@ -322,7 +324,8 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.emitRawPointer(CGF) !=
+ Src.getAggregatePointer(E->getType(), CGF));
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -338,12 +341,13 @@ void AggExprEmitter::withReturnValueSlot(
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
assert(src.isAggregate() && "value must be aggregate value!");
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
- EmitFinalDestCopy(type, srcLV, EVK_RValue);
+ EmitFinalDestCopy(type, srcLV, CodeGenFunction::EVK_RValue);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
-void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
- ExprValueKind SrcValueKind) {
+void AggExprEmitter::EmitFinalDestCopy(
+ QualType type, const LValue &src,
+ CodeGenFunction::ExprValueKind SrcValueKind) {
// If Dest is ignored, then we're evaluating an aggregate expression
// in a context that doesn't care about the result. Note that loads
// from volatile l-values force the existence of a non-ignored
@@ -355,7 +359,7 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
LValue DstLV = CGF.MakeAddrLValue(
Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
- if (SrcValueKind == EVK_RValue) {
+ if (SrcValueKind == CodeGenFunction::EVK_RValue) {
if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
if (Dest.isPotentiallyAliased())
CGF.callCStructMoveAssignmentOperator(DstLV, src);
@@ -374,8 +378,8 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
}
AggValueSlot srcAgg = AggValueSlot::forLValue(
- src, CGF, AggValueSlot::IsDestructed, needsGC(type),
- AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+ src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
EmitCopy(type, Dest, srcAgg);
}
@@ -413,60 +417,51 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- Address ArrayPtr = Array.getAddress(CGF);
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
- // FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
- if (Field == Record->field_end()) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
+ assert(Field != Record->field_end() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list first field to be const E *");
// Start pointer.
- if (!Field->getType()->isPointerType() ||
- !Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType())) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
-
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
- llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
- llvm::Value *IdxStart[] = { Zero, Zero };
- llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart");
+ llvm::Value *ArrayStart = ArrayPtr.emitRawPointer(CGF);
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
-
- if (Field == Record->field_end()) {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
- }
+ assert(Field != Record->field_end() &&
+ "Expected std::initializer_list to have two fields");
llvm::Value *Size = Builder.getInt(ArrayType->getSize());
LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
- if (Field->getType()->isPointerType() &&
- Ctx.hasSameType(Field->getType()->getPointeeType(),
- ArrayType->getElementType())) {
+ if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
+
+ } else {
// End pointer.
+ assert(Field->getType()->isPointerType() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType()) &&
+ "Expected std::initializer_list second field to be const E *");
+ llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
- ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend");
+ ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd,
+ "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
- } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
- // Length.
- CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
- } else {
- CGF.ErrorUnsupported(E, "weird std::initializer_list");
- return;
}
+
+ assert(++Field == Record->field_end() &&
+ "Expected std::initializer_list to only have two fields");
}
/// Determine if E is a trivial array filler, that is, one that is
@@ -500,19 +495,20 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
uint64_t NumInitElements = Args.size();
uint64_t NumArrayElements = AType->getNumElements();
+ for (const auto *Init : Args) {
+ if (const auto *Embed = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) {
+ NumInitElements += Embed->getDataElementCount() - 1;
+ if (NumInitElements > NumArrayElements) {
+ NumInitElements = NumArrayElements;
+ break;
+ }
+ }
+ }
+
assert(NumInitElements <= NumArrayElements);
QualType elementType =
CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
-
- // DestPtr is an array*. Construct an elementType* by drilling
- // down a level.
- llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
- llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- DestPtr.getElementType(), DestPtr.getPointer(), indices,
- "arrayinit.begin");
-
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
@@ -525,9 +521,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
elementType.isTriviallyCopyableType(CGF.getContext())) {
CodeGen::CodeGenModule &CGM = CGF.CGM;
ConstantEmitter Emitter(CGF);
- LangAS AS = ArrayQTy.getAddressSpace();
+ QualType GVArrayQTy = CGM.getContext().getAddrSpaceQualType(
+ CGM.getContext().removeAddrSpaceQualType(ArrayQTy),
+ CGM.GetGlobalConstantAddressSpace());
+ LangAS AS = GVArrayQTy.getAddressSpace();
if (llvm::Constant *C =
- Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) {
+ Emitter.tryEmitForInitializer(ExprToVisit, AS, GVArrayQTy)) {
auto GV = new llvm::GlobalVariable(
CGM.getModule(), C->getType(),
/* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
@@ -535,10 +534,10 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
/* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
Emitter.finalize(GV);
- CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(GVArrayQTy);
GV->setAlignment(Align.getAsAlign());
Address GVAddr(GV, GV->getValueType(), Align);
- EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy));
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, GVArrayQTy));
return;
}
}
@@ -548,51 +547,63 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
Address endOfInit = Address::invalid();
- EHScopeStack::stable_iterator cleanup;
- llvm::Instruction *cleanupDominator = nullptr;
- if (CGF.needsEHCleanup(dtorKind)) {
+ CodeGenFunction::CleanupDeactivationScope deactivation(CGF);
+
+ llvm::Value *begin = DestPtr.emitRawPointer(CGF);
+ if (dtorKind) {
+ CodeGenFunction::AllocaTrackerRAII allocaTracker(CGF);
// In principle we could tell the cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
+ llvm::Instruction *dominatingIP =
+ Builder.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF.Int8PtrTy));
endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
"arrayinit.endOfInit");
- cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
elementAlign,
CGF.getDestroyer(dtorKind));
- cleanup = CGF.EHStack.stable_begin();
+ cast<EHCleanupScope>(*CGF.EHStack.find(CGF.EHStack.stable_begin()))
+ .AddAuxAllocas(allocaTracker.Take());
- // Otherwise, remember that we didn't need a cleanup.
- } else {
- dtorKind = QualType::DK_none;
+ CGF.DeferredDeactivationCleanupStack.push_back(
+ {CGF.EHStack.stable_begin(), dominatingIP});
}
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
- // The 'current element to initialize'. The invariants on this
- // variable are complicated. Essentially, after each iteration of
- // the loop, it points to the last initialized element, except
- // that it points to the beginning of the array before any
- // elements have been initialized.
- llvm::Value *element = begin;
-
- // Emit the explicit initializers.
- for (uint64_t i = 0; i != NumInitElements; ++i) {
- // Advance to the next element.
- if (i > 0) {
+ auto Emit = [&](Expr *Init, uint64_t ArrayIndex) {
+ llvm::Value *element = begin;
+ if (ArrayIndex > 0) {
element = Builder.CreateInBoundsGEP(
- llvmElementType, element, one, "arrayinit.element");
+ llvmElementType, begin,
+ llvm::ConstantInt::get(CGF.SizeTy, ArrayIndex), "arrayinit.element");
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
- if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
+ if (endOfInit.isValid())
+ Builder.CreateStore(element, endOfInit);
}
LValue elementLV = CGF.MakeAddrLValue(
Address(element, llvmElementType, elementAlign), elementType);
- EmitInitializationToLValue(Args[i], elementLV);
+ EmitInitializationToLValue(Init, elementLV);
+ return true;
+ };
+
+ unsigned ArrayIndex = 0;
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ if (ArrayIndex >= NumInitElements)
+ break;
+ if (auto *EmbedS = dyn_cast<EmbedExpr>(Args[i]->IgnoreParenImpCasts())) {
+ EmbedS->doForEachDataElement(Emit, ArrayIndex);
+ } else {
+ Emit(Args[i], ArrayIndex);
+ ArrayIndex++;
+ }
}
// Check whether there's a non-trivial array-fill expression.
@@ -609,9 +620,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// do { *array++ = filler; } while (array != end);
// Advance to the start of the rest of the array.
+ llvm::Value *element = begin;
if (NumInitElements) {
element = Builder.CreateInBoundsGEP(
- llvmElementType, element, one, "arrayinit.start");
+ llvmElementType, element,
+ llvm::ConstantInt::get(CGF.SizeTy, NumInitElements),
+ "arrayinit.start");
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
@@ -661,9 +675,6 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CGF.EmitBlock(endBB);
}
-
- // Leave the partial-array cleanup if we entered one.
- if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
}
//===----------------------------------------------------------------------===//
@@ -732,7 +743,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
- CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
@@ -765,8 +776,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
- Address SourceAddress =
- SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
@@ -873,6 +883,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
[[fallthrough]];
+ case CK_HLSLArrayRValue:
+ Visit(E->getSubExpr());
+ break;
case CK_NoOp:
case CK_UserDefinedConversion:
@@ -930,6 +943,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLOpaqueType:
case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
case CK_IntToOCLSampler:
case CK_FloatingToFixedPoint:
@@ -1051,7 +1065,7 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
if (RV.isScalar())
return {RV.getScalarVal(), nullptr};
if (RV.isAggregate())
- return {RV.getAggregatePointer(), nullptr};
+ return {RV.getAggregatePointer(E->getType(), CGF), nullptr};
assert(RV.isComplex());
return RV.getComplexVal();
};
@@ -1212,7 +1226,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
EmitCopy(E->getLHS()->getType(),
- AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased,
AggValueSlot::MayOverlap),
@@ -1234,7 +1248,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot = AggValueSlot::forLValue(
- LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
+ LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
@@ -1275,7 +1289,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
- CGF.incrementProfileCounter(E);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getTrueExpr());
+ else
+ CGF.incrementProfileCounter(E);
Visit(E->getTrueExpr());
eval.end(CGF);
@@ -1290,6 +1307,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E->getFalseExpr());
Visit(E->getFalseExpr());
eval.end(CGF);
@@ -1298,6 +1317,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
E->getType());
CGF.EmitBlock(ContBlock);
+ if (llvm::EnableSingleByteCoverage)
+ CGF.incrementProfileCounter(E);
}
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
@@ -1306,15 +1327,13 @@ void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
Address ArgValue = Address::invalid();
- Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
+ CGF.EmitVAArg(VE, ArgValue, Dest);
// If EmitVAArg fails, emit an error.
- if (!ArgPtr.isValid()) {
+ if (!ArgValue.isValid()) {
CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
return;
}
-
- EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1353,9 +1372,8 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
// We'll need to enter cleanup scopes in case any of the element
- // initializers throws an exception.
- SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
- llvm::Instruction *CleanupDominator = nullptr;
+ // initializers throws an exception or contains branch out of the expressions.
+ CodeGenFunction::CleanupDeactivationScope scope(CGF);
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
@@ -1374,28 +1392,12 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
if (QualType::DestructionKind DtorKind =
CurField->getType().isDestructedType()) {
assert(LV.isSimple());
- if (CGF.needsEHCleanup(DtorKind)) {
- if (!CleanupDominator)
- CleanupDominator = CGF.Builder.CreateAlignedLoad(
- CGF.Int8Ty,
- llvm::Constant::getNullValue(CGF.Int8PtrTy),
- CharUnits::One()); // placeholder
-
- CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(),
- CGF.getDestroyer(DtorKind), false);
- Cleanups.push_back(CGF.EHStack.stable_begin());
- }
+ if (DtorKind)
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ CurField->getType(),
+ CGF.getDestroyer(DtorKind), false);
}
}
-
- // Deactivate all the partial cleanups in reverse order, which
- // generally means popping them.
- for (unsigned i = Cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
-
- // Destroy the placeholder if we made one.
- if (CleanupDominator)
- CleanupDominator->eraseFromParent();
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -1454,6 +1456,7 @@ static bool castPreservesZero(const CastExpr *CE) {
case CK_MatrixCast:
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
+ case CK_HLSLVectorTruncation:
return true;
case CK_BaseToDerivedMemberPointer:
@@ -1505,6 +1508,7 @@ static bool castPreservesZero(const CastExpr *CE) {
case CK_LValueToRValue:
case CK_LValueToRValueBitCast:
case CK_UncheckedDerivedToBase:
+ case CK_HLSLArrayRValue:
return false;
}
llvm_unreachable("Unhandled clang::CastKind enum");
@@ -1569,7 +1573,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
return;
case TEK_Aggregate:
CGF.EmitAggExpr(
- E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
+ E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::MayOverlap, Dest.isZeroed()));
@@ -1608,7 +1612,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -1682,14 +1686,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
- llvm::Instruction *cleanupDominator = nullptr;
- auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) {
- cleanups.push_back(cleanup);
- if (!cleanupDominator) // create placeholder once needed
- cleanupDominator = CGF.Builder.CreateAlignedLoad(
- CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy),
- CharUnits::One());
- };
+ CodeGenFunction::CleanupDeactivationScope DeactivateCleanups(CGF);
unsigned curInitIndex = 0;
@@ -1712,10 +1709,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
if (QualType::DestructionKind dtorKind =
- Base.getType().isDestructedType()) {
- CGF.pushDestroy(dtorKind, V, Base.getType());
- addCleanup(CGF.EHStack.stable_begin());
- }
+ Base.getType().isDestructedType())
+ CGF.pushDestroyAndDeferDeactivation(dtorKind, V, Base.getType());
}
}
@@ -1732,7 +1727,9 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// Make sure that it's really an empty and not a failure of
// semantic analysis.
for (const auto *Field : record->fields())
- assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
+ assert(
+ (Field->isUnnamedBitField() || Field->isAnonymousStructOrUnion()) &&
+ "Only unnamed bitfields or anonymous class allowed");
#endif
return;
}
@@ -1760,7 +1757,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
break;
// Always skip anonymous bitfields.
- if (field->isUnnamedBitfield())
+ if (field->isUnnamedBitField())
continue;
// We're done if we reach the end of the explicit initializers, we
@@ -1786,37 +1783,16 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// Push a destructor if necessary.
// FIXME: if we have an array of structures, all explicitly
// initialized, we can end up pushing a linear number of cleanups.
- bool pushedCleanup = false;
if (QualType::DestructionKind dtorKind
= field->getType().isDestructedType()) {
assert(LV.isSimple());
- if (CGF.needsEHCleanup(dtorKind)) {
- CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(),
- CGF.getDestroyer(dtorKind), false);
- addCleanup(CGF.EHStack.stable_begin());
- pushedCleanup = true;
+ if (dtorKind) {
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ field->getType(),
+ CGF.getDestroyer(dtorKind), false);
}
}
-
- // If the GEP didn't get used because of a dead zero init or something
- // else, clean it up for -O0 builds and general tidiness.
- if (!pushedCleanup && LV.isSimple())
- if (llvm::GetElementPtrInst *GEP =
- dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF)))
- if (GEP->use_empty())
- GEP->eraseFromParent();
- }
-
- // Deactivate all the partial cleanups in reverse order, which
- // generally means popping them.
- assert((cleanupDominator || cleanups.empty()) &&
- "Missing cleanupDominator before deactivating cleanup blocks");
- for (unsigned i = cleanups.size(); i != 0; --i)
- CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
-
- // Destroy the placeholder if we made one.
- if (cleanupDominator)
- cleanupDominator->eraseFromParent();
+ }
}
void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
@@ -1833,9 +1809,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// destPtr is an array*. Construct an elementType* by drilling down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = {zero, zero};
- llvm::Value *begin = Builder.CreateInBoundsGEP(
- destPtr.getElementType(), destPtr.getPointer(), indices,
- "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getElementType(),
+ destPtr.emitRawPointer(CGF),
+ indices, "arrayinit.begin");
// Prepare to special-case multidimensional array initialization: we avoid
// emitting multiple destructor loops in that case.
@@ -1887,7 +1863,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
auto elementSlot = AggValueSlot::forLValue(
- elementLV, CGF, AggValueSlot::IsDestructed,
+ elementLV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap);
AggExprEmitter(CGF, elementSlot, false)
@@ -1965,7 +1941,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
if (Field->getType()->isIncompleteArrayType() ||
ILEElement == ILE->getNumInits())
break;
- if (Field->isUnnamedBitfield())
+ if (Field->isUnnamedBitField())
continue;
const Expr *E = ILE->getInit(ILEElement++);
@@ -2052,18 +2028,29 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(
- LV, *this, AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return LV;
}
+void CodeGenFunction::EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest,
+ const LValue &Src,
+ ExprValueKind SrcKind) {
+ return AggExprEmitter(*this, Dest, Dest.isIgnored())
+ .EmitFinalDestCopy(Type, Src, SrcKind);
+}
+
AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
return AggValueSlot::DoesNotOverlap;
+ // Empty fields can overlap earlier fields.
+ if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
+ return AggValueSlot::MayOverlap;
+
// If the field lies entirely within the enclosing class's nvsize, its tail
// padding cannot overlap any already-initialized object. (The only subobjects
// with greater addresses that might already be initialized are vbases.)
@@ -2086,6 +2073,10 @@ AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
if (IsVirtual)
return AggValueSlot::MayOverlap;
+ // Empty bases can overlap earlier bases.
+ if (BaseRD->isEmpty())
+ return AggValueSlot::MayOverlap;
+
// If the base class is laid out entirely within the nvsize of the derived
// class, its tail padding cannot yet be initialized, so we can issue
// stores at the full width of the base class.
@@ -2104,8 +2095,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- Address DestPtr = Dest.getAddress(*this);
- Address SrcPtr = Src.getAddress(*this);
+ Address DestPtr = Dest.getAddress();
+ Address SrcPtr = Src.getAddress();
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {