summaryrefslogtreecommitdiff
path: root/lib/AST/ExprConstant.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/AST/ExprConstant.cpp')
-rw-r--r--lib/AST/ExprConstant.cpp337
1 files changed, 246 insertions, 91 deletions
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 2c0fce91844c..2fafa4876758 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -350,36 +350,49 @@ namespace {
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
- void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &N);
/// Add N to the address of this subobject.
- void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
- if (Invalid) return;
+ void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
+ if (Invalid || !N) return;
+ uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
if (isMostDerivedAnUnsizedArray()) {
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
- Entries.back().ArrayIndex += N;
- return;
- }
- if (MostDerivedPathLength == Entries.size() &&
- MostDerivedIsArrayElement) {
- Entries.back().ArrayIndex += N;
- if (Entries.back().ArrayIndex > getMostDerivedArraySize()) {
- diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
- setInvalid();
- }
+ // FIXME: Should we reject if this overflows, at least?
+ Entries.back().ArrayIndex += TruncatedN;
return;
}
+
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
- if (IsOnePastTheEnd && N == (uint64_t)-1)
- IsOnePastTheEnd = false;
- else if (!IsOnePastTheEnd && N == 1)
- IsOnePastTheEnd = true;
- else if (N != 0) {
- diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ bool IsArray = MostDerivedPathLength == Entries.size() &&
+ MostDerivedIsArrayElement;
+ uint64_t ArrayIndex =
+ IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
+ uint64_t ArraySize =
+ IsArray ? getMostDerivedArraySize() : (uint64_t)1;
+
+ if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
+ // Calculate the actual index in a wide enough type, so we can include
+ // it in the note.
+ N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
+ (llvm::APInt&)N += ArrayIndex;
+ assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
+ diagnosePointerArithmetic(Info, E, N);
setInvalid();
+ return;
}
+
+ ArrayIndex += TruncatedN;
+ assert(ArrayIndex <= ArraySize &&
+ "bounds check succeeded for out-of-bounds index");
+
+ if (IsArray)
+ Entries.back().ArrayIndex = ArrayIndex;
+ else
+ IsOnePastTheEnd = (ArrayIndex != 0);
}
};
@@ -413,6 +426,17 @@ namespace {
/// Index - The call index of this call.
unsigned Index;
+ // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
+ // on the overall stack usage of deeply-recursing constexpr evaluataions.
+ // (We should cache this map rather than recomputing it repeatedly.)
+ // But let's try this and see how it goes; we can look into caching the map
+ // as a later change.
+
+ /// LambdaCaptureFields - Mapping from captured variables/this to
+ /// corresponding data members in the closure class.
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments);
@@ -1048,16 +1072,17 @@ bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
}
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
- const Expr *E, uint64_t N) {
+ const Expr *E,
+ const APSInt &N) {
// If we're complaining, we must be able to statically determine the size of
// the most derived array.
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
- << static_cast<int>(N) << /*array*/ 0
+ << N << /*array*/ 0
<< static_cast<unsigned>(getMostDerivedArraySize());
else
Info.CCEDiag(E, diag::note_constexpr_array_index)
- << static_cast<int>(N) << /*non-array*/ 1;
+ << N << /*non-array*/ 1;
setInvalid();
}
@@ -1273,14 +1298,24 @@ namespace {
void clearIsNullPointer() {
IsNullPtr = false;
}
- void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, uint64_t Index,
- CharUnits ElementSize) {
- // Compute the new offset in the appropriate width.
- Offset += Index * ElementSize;
- if (Index && checkNullPointer(Info, E, CSK_ArrayIndex))
+ void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
+ const APSInt &Index, CharUnits ElementSize) {
+ // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
+ // but we're not required to diagnose it and it's valid in C++.)
+ if (!Index)
+ return;
+
+ // Compute the new offset in the appropriate width, wrapping at 64 bits.
+ // FIXME: When compiling for a 32-bit target, we should use 32-bit
+ // offsets.
+ uint64_t Offset64 = Offset.getQuantity();
+ uint64_t ElemSize64 = ElementSize.getQuantity();
+ uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
+ Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
+
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
Designator.adjustIndex(Info, E, Index);
- if (Index)
- clearIsNullPointer();
+ clearIsNullPointer();
}
void adjustOffset(CharUnits N) {
Offset += N;
@@ -1404,13 +1439,24 @@ static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
-static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
+ EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
+/// Negate an APSInt in place, converting it to a signed form if necessary, and
+/// preserving its value (by extending by up to one bit as needed).
+static void negateAsSigned(APSInt &Int) {
+ if (Int.isUnsigned() || Int.isMinSignedValue()) {
+ Int = Int.extend(Int.getBitWidth() + 1);
+ Int.setIsSigned(true);
+ }
+ Int = -Int;
+}
+
/// Produce a string describing the given constexpr call.
static void describeCall(CallStackFrame *Frame, raw_ostream &Out) {
unsigned ArgIndex = 0;
@@ -1458,13 +1504,6 @@ static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
return true;
}
-/// Sign- or zero-extend a value to 64 bits. If it's already 64 bits, just
-/// return its existing value.
-static int64_t getExtValue(const APSInt &Value) {
- return Value.isSigned() ? Value.getSExtValue()
- : static_cast<int64_t>(Value.getZExtValue());
-}
-
/// Should this call expression be treated as a string literal?
static bool IsStringLiteralCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
@@ -2220,7 +2259,7 @@ static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
LValue &LVal, QualType EltTy,
- int64_t Adjustment) {
+ APSInt Adjustment) {
CharUnits SizeOfPointee;
if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
return false;
@@ -2229,6 +2268,13 @@ static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
return true;
}
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ return HandleLValueArrayAdjustment(Info, E, LVal, EltTy,
+ APSInt::get(Adjustment));
+}
+
/// Update an lvalue to refer to a component of a complex number.
/// \param Info - Information about the ongoing evaluation.
/// \param LVal - The lvalue to be updated.
@@ -2247,6 +2293,10 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
return true;
}
+static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type, const LValue &LVal,
+ APValue &RVal);
+
/// Try to evaluate the initializer for a variable declaration.
///
/// \param Info Information about the ongoing evaluation.
@@ -2258,6 +2308,7 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
const VarDecl *VD, CallStackFrame *Frame,
APValue *&Result) {
+
// If this is a parameter to an active constexpr function call, perform
// argument substitution.
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
@@ -3191,9 +3242,9 @@ struct CompoundAssignSubobjectHandler {
return false;
}
- int64_t Offset = getExtValue(RHS.getInt());
+ APSInt Offset = RHS.getInt();
if (Opcode == BO_Sub)
- Offset = -Offset;
+ negateAsSigned(Offset);
LValue LVal;
LVal.setFrom(Info.Ctx, Subobj);
@@ -4148,6 +4199,10 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
return false;
This->moveInto(Result);
return true;
+ } else if (MD && isLambdaCallOperator(MD)) {
+ // We're in a lambda; determine the lambda capture field maps.
+ MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields,
+ Frame.LambdaThisCaptureField);
}
StmtResult Ret = {Result, ResultSlot};
@@ -4691,7 +4746,10 @@ public:
case CK_AtomicToNonAtomic: {
APValue AtomicVal;
- if (!EvaluateAtomic(E->getSubExpr(), AtomicVal, Info))
+ // This does not need to be done in place even for class/array types:
+ // atomic-to-non-atomic conversion implies copying the object
+ // representation.
+ if (!Evaluate(AtomicVal, Info, E->getSubExpr()))
return false;
return DerivedSuccess(AtomicVal, E);
}
@@ -5009,6 +5067,33 @@ bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+
+ // If we are within a lambda's call operator, check whether the 'VD' referred
+ // to within 'E' actually represents a lambda-capture that maps to a
+ // data-member/field within the closure object, and if so, evaluate to the
+ // field or what the field refers to.
+ if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee)) {
+ if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+ // Start with 'Result' referring to the complete closure object...
+ Result = *Info.CurrentCall->This;
+ // ... then update it to refer to the field of the closure object
+ // that represents the capture.
+ if (!HandleLValueMember(Info, E, Result, FD))
+ return false;
+ // And if the field is of reference type, update 'Result' to refer to what
+ // the field refers to.
+ if (FD->getType()->isReferenceType()) {
+ APValue RVal;
+ if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result,
+ RVal))
+ return false;
+ Result.setFrom(Info.Ctx, RVal);
+ }
+ return true;
+ }
+ }
CallStackFrame *Frame = nullptr;
if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) {
// Only if a local variable was declared in the function currently being
@@ -5162,8 +5247,7 @@ bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (!EvaluateInteger(E->getIdx(), Index, Info))
return false;
- return HandleLValueArrayAdjustment(Info, E, Result, E->getType(),
- getExtValue(Index));
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), Index);
}
bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
@@ -5409,6 +5493,27 @@ public:
return false;
}
Result = *Info.CurrentCall->This;
+ // If we are inside a lambda's call operator, the 'this' expression refers
+ // to the enclosing '*this' object (either by value or reference) which is
+ // either copied into the closure object's field that represents the '*this'
+ // or refers to '*this'.
+ if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
+ // Update 'Result' to refer to the data member/field of the closure object
+ // that represents the '*this' capture.
+ if (!HandleLValueMember(Info, E, Result,
+ Info.CurrentCall->LambdaThisCaptureField))
+ return false;
+ // If we captured '*this' by reference, replace the field with its referent.
+ if (Info.CurrentCall->LambdaThisCaptureField->getType()
+ ->isPointerType()) {
+ APValue RVal;
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), Result,
+ RVal))
+ return false;
+
+ Result.setFrom(Info.Ctx, RVal);
+ }
+ }
return true;
}
@@ -5440,13 +5545,11 @@ bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
return false;
- int64_t AdditionalOffset = getExtValue(Offset);
if (E->getOpcode() == BO_Sub)
- AdditionalOffset = -AdditionalOffset;
+ negateAsSigned(Offset);
QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
- return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
- AdditionalOffset);
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee, Offset);
}
bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
@@ -5576,6 +5679,8 @@ static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
T = Ref->getPointeeType();
// __alignof is defined to return the preferred alignment.
+ if (T.getQualifiers().hasUnaligned())
+ return CharUnits::One();
return Info.Ctx.toCharUnitsFromBits(
Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
}
@@ -5640,14 +5745,14 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APSInt Alignment;
if (!EvaluateInteger(E->getArg(1), Alignment, Info))
return false;
- CharUnits Align = CharUnits::fromQuantity(getExtValue(Alignment));
+ CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
if (E->getNumArgs() > 2) {
APSInt Offset;
if (!EvaluateInteger(E->getArg(2), Offset, Info))
return false;
- int64_t AdditionalOffset = -getExtValue(Offset);
+ int64_t AdditionalOffset = -Offset.getZExtValue();
OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
}
@@ -5664,12 +5769,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (BaseAlignment < Align) {
Result.Designator.setInvalid();
- // FIXME: Quantities here cast to integers because the plural modifier
- // does not work on APSInts yet.
+ // FIXME: Add support to Diagnostic for long / long long.
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_insufficient_alignment) << 0
- << (int) BaseAlignment.getQuantity()
- << (unsigned) getExtValue(Alignment);
+ << (unsigned)BaseAlignment.getQuantity()
+ << (unsigned)Align.getQuantity();
return false;
}
}
@@ -5677,18 +5781,14 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// The offset must also have the correct alignment.
if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) {
Result.Designator.setInvalid();
- APSInt Offset(64, false);
- Offset = OffsetResult.Offset.getQuantity();
-
- if (OffsetResult.Base)
- CCEDiag(E->getArg(0),
- diag::note_constexpr_baa_insufficient_alignment) << 1
- << (int) getExtValue(Offset) << (unsigned) getExtValue(Alignment);
- else
- CCEDiag(E->getArg(0),
- diag::note_constexpr_baa_value_insufficient_alignment)
- << Offset << (unsigned) getExtValue(Alignment);
+ (OffsetResult.Base
+ ? CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 1
+ : CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_value_insufficient_alignment))
+ << (int)OffsetResult.Offset.getQuantity()
+ << (unsigned)Align.getQuantity();
return false;
}
@@ -6245,14 +6345,40 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
if (ClosureClass->isInvalidDecl()) return false;
if (Info.checkingPotentialConstantExpression()) return true;
- if (E->capture_size()) {
- Info.FFDiag(E, diag::note_unimplemented_constexpr_lambda_feature_ast)
- << "can not evaluate lambda expressions with captures";
- return false;
+
+ const size_t NumFields =
+ std::distance(ClosureClass->field_begin(), ClosureClass->field_end());
+
+ assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
+ E->capture_init_end()) &&
+ "The number of lambda capture initializers should equal the number of "
+ "fields within the closure type");
+
+ Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
+ // Iterate through all the lambda's closure object's fields and initialize
+ // them.
+ auto *CaptureInitIt = E->capture_init_begin();
+ const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
+ bool Success = true;
+ for (const auto *Field : ClosureClass->fields()) {
+ assert(CaptureInitIt != E->capture_init_end());
+ // Get the initializer for this field
+ Expr *const CurFieldInit = *CaptureInitIt++;
+
+ // If there is no initializer, either this is a VLA or an error has
+ // occurred.
+ if (!CurFieldInit)
+ return Error(E);
+
+ APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
+ if (!EvaluateInPlace(FieldVal, Info, This, CurFieldInit)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ ++CaptureIt;
}
- // FIXME: Implement captures.
- Result = APValue(APValue::UninitStruct(), /*NumBases*/0, /*NumFields*/0);
- return true;
+ return Success;
}
static bool EvaluateRecord(const Expr *E, const LValue &This,
@@ -6971,7 +7097,6 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::Dependent:
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
@@ -7030,6 +7155,7 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case Type::Vector:
case Type::ExtVector:
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
@@ -7948,6 +8074,19 @@ bool DataRecursiveIntBinOpEvaluator::
return true;
}
+static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
+ bool IsSub) {
+ // Compute the new offset in the appropriate width, wrapping at 64 bits.
+ // FIXME: When compiling for a 32-bit target, we should use 32-bit
+ // offsets.
+ assert(!LVal.hasLValuePath() && "have designator for integer lvalue");
+ CharUnits &Offset = LVal.getLValueOffset();
+ uint64_t Offset64 = Offset.getQuantity();
+ uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
+ Offset = CharUnits::fromQuantity(IsSub ? Offset64 - Index64
+ : Offset64 + Index64);
+}
+
bool DataRecursiveIntBinOpEvaluator::
VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
const BinaryOperator *E, APValue &Result) {
@@ -7994,12 +8133,7 @@ bool DataRecursiveIntBinOpEvaluator::
// Handle cases like (unsigned long)&a + 4.
if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
Result = LHSVal;
- CharUnits AdditionalOffset =
- CharUnits::fromQuantity(RHSVal.getInt().getZExtValue());
- if (E->getOpcode() == BO_Add)
- Result.getLValueOffset() += AdditionalOffset;
- else
- Result.getLValueOffset() -= AdditionalOffset;
+ addOrSubLValueAsInteger(Result, RHSVal.getInt(), E->getOpcode() == BO_Sub);
return true;
}
@@ -8007,8 +8141,7 @@ bool DataRecursiveIntBinOpEvaluator::
if (E->getOpcode() == BO_Add &&
RHSVal.isLValue() && LHSVal.isInt()) {
Result = RHSVal;
- Result.getLValueOffset() +=
- CharUnits::fromQuantity(LHSVal.getInt().getZExtValue());
+ addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false);
return true;
}
@@ -9565,10 +9698,11 @@ bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
namespace {
class AtomicExprEvaluator :
public ExprEvaluatorBase<AtomicExprEvaluator> {
+ const LValue *This;
APValue &Result;
public:
- AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
- : ExprEvaluatorBaseTy(Info), Result(Result) {}
+ AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result = V;
@@ -9578,7 +9712,10 @@ public:
bool ZeroInitialization(const Expr *E) {
ImplicitValueInitExpr VIE(
E->getType()->castAs<AtomicType>()->getValueType());
- return Evaluate(Result, Info, &VIE);
+ // For atomic-qualified class (and array) types in C++, initialize the
+ // _Atomic-wrapped subobject directly, in-place.
+ return This ? EvaluateInPlace(Result, Info, *This, &VIE)
+ : Evaluate(Result, Info, &VIE);
}
bool VisitCastExpr(const CastExpr *E) {
@@ -9586,15 +9723,17 @@ public:
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_NonAtomicToAtomic:
- return Evaluate(Result, Info, E->getSubExpr());
+ return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr())
+ : Evaluate(Result, Info, E->getSubExpr());
}
}
};
} // end anonymous namespace
-static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info) {
+static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
+ EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isAtomicType());
- return AtomicExprEvaluator(Info, Result).Visit(E);
+ return AtomicExprEvaluator(Info, This, Result).Visit(E);
}
//===----------------------------------------------------------------------===//
@@ -9699,8 +9838,17 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
if (!EvaluateVoid(E, Info))
return false;
} else if (T->isAtomicType()) {
- if (!EvaluateAtomic(E, Result, Info))
- return false;
+ QualType Unqual = T.getAtomicUnqualifiedType();
+ if (Unqual->isArrayType() || Unqual->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ if (!EvaluateAtomic(E, &LV, Value, Info))
+ return false;
+ } else {
+ if (!EvaluateAtomic(E, nullptr, Result, Info))
+ return false;
+ }
} else if (Info.getLangOpts().CPlusPlus11) {
Info.FFDiag(E, diag::note_constexpr_nonliteral) << E->getType();
return false;
@@ -9725,10 +9873,16 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
if (E->isRValue()) {
// Evaluate arrays and record types in-place, so that later initializers can
// refer to earlier-initialized members of the object.
- if (E->getType()->isArrayType())
+ QualType T = E->getType();
+ if (T->isArrayType())
return EvaluateArray(E, This, Result, Info);
- else if (E->getType()->isRecordType())
+ else if (T->isRecordType())
return EvaluateRecord(E, This, Result, Info);
+ else if (T->isAtomicType()) {
+ QualType Unqual = T.getAtomicUnqualifiedType();
+ if (Unqual->isArrayType() || Unqual->isRecordType())
+ return EvaluateAtomic(E, &This, Result, Info);
+ }
}
// For any other type, in-place evaluation is unimportant.
@@ -9945,7 +10099,7 @@ bool Expr::EvalResult::isGlobalLValue() const {
// Note that to reduce code duplication, this helper does no evaluation
// itself; the caller checks whether the expression is evaluatable, and
// in the rare cases where CheckICE actually cares about the evaluated
-// value, it calls into Evalute.
+// value, it calls into Evaluate.
namespace {
@@ -10067,6 +10221,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::LambdaExprClass:
case Expr::CXXFoldExprClass:
case Expr::CoawaitExprClass:
+ case Expr::DependentCoawaitExprClass:
case Expr::CoyieldExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());