aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGExprScalar.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGExprScalar.cpp')
-rw-r--r--lib/CodeGen/CGExprScalar.cpp240
1 files changed, 164 insertions, 76 deletions
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 140e9aa3445f..a9cbf05da104 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -85,18 +85,54 @@ public:
return CGF.EmitCheckedLValue(E, TCK);
}
- void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
+ void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerKind>> Checks,
+ const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
}
+ void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
+ const AlignValueAttr *AVAttr = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+
+ if (VD->getType()->isReferenceType()) {
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+ } else {
+ // Assumptions for function parameters are emitted at the start of the
+ // function, so there is no need to repeat that here.
+ if (isa<ParmVarDecl>(VD))
+ return;
+
+ AVAttr = VD->getAttr<AlignValueAttr>();
+ }
+ }
+
+ if (!AVAttr)
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(E->getType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+
+ if (!AVAttr)
+ return;
+
+ Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
+ CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
+ }
+
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
- E->getExprLoc());
+ Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
+ E->getExprLoc());
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -160,6 +196,7 @@ public:
//===--------------------------------------------------------------------===//
Value *Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E->getLocStart());
return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
}
@@ -274,6 +311,10 @@ public:
Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
if (E->getType()->isVariablyModifiedType())
CGF.EmitVariablyModifiedType(E->getType());
+
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitExplicitCastType(E->getType());
+
return VisitCastExpr(E);
}
Value *VisitCastExpr(CastExpr *E);
@@ -282,7 +323,10 @@ public:
if (E->getCallReturnType()->isReferenceType())
return EmitLoadOfLValue(E);
- return CGF.EmitCallExpr(E).getScalarVal();
+ Value *V = CGF.EmitCallExpr(E).getScalarVal();
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
Value *VisitStmtExpr(const StmtExpr *E);
@@ -410,7 +454,7 @@ public:
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -418,7 +462,8 @@ public:
}
}
- if (Ops.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (Ops.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(Ops);
if (Ops.LHS->getType()->isFPOrFPVectorTy())
@@ -682,8 +727,8 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
CGF.EmitCheckTypeDescriptor(OrigSrcType),
CGF.EmitCheckTypeDescriptor(DstType)
};
- CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
+ "float_cast_overflow", StaticArgs, OrigSrc);
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
@@ -701,7 +746,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
llvm::Type *SrcTy = Src->getType();
// If casting to/from storage-only half FP, use special intrinsics.
- if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
Src = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
CGF.CGM.FloatTy),
@@ -767,13 +813,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// An overflowing conversion has undefined behavior if either the source type
// or the destination type is a floating-point type.
- if (CGF.SanOpts->FloatCastOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
(OrigSrcType->isFloatingType() || DstType->isFloatingType()))
EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType,
DstTy);
// Cast to half via float
- if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
DstTy = CGF.FloatTy;
if (isa<llvm::IntegerType>(SrcTy)) {
@@ -839,8 +886,10 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
/// \brief Emit a sanitization check for the given "binary" operation (which
/// might actually be a unary increment which has been lowered to a binary
-/// operation). The check passes if \p Check, which is an \c i1, is \c true.
-void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+/// operation). The check passes if all values in \p Checks (which are \c i1),
+/// are \c true.
+void ScalarExprEmitter::EmitBinOpCheck(
+ ArrayRef<std::pair<Value *, SanitizerKind>> Checks, const BinOpInfo &Info) {
assert(CGF.IsSanitizerScope);
StringRef CheckName;
SmallVector<llvm::Constant *, 4> StaticData;
@@ -870,7 +919,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
CheckName = "divrem_overflow";
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
- // Signed arithmetic overflow (+, -, *).
+ // Arithmetic overflow (+, -, *).
switch (Opcode) {
case BO_Add: CheckName = "add_overflow"; break;
case BO_Sub: CheckName = "sub_overflow"; break;
@@ -883,8 +932,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Check, CheckName, StaticData, DynamicData,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
}
//===----------------------------------------------------------------------===//
@@ -1076,7 +1124,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Idx = Visit(E->getIdx());
QualType IdxTy = E->getIdx()->getType();
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
return Builder.CreateExtractElement(Base, Idx, "vecext");
@@ -1304,8 +1352,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
- llvm::Type *MidTy = CGF.CGM.getDataLayout().getIntPtrType(SrcTy);
- return Builder.CreateIntToPtr(Builder.CreatePtrToInt(Src, MidTy), DstTy);
+ llvm_unreachable("wrong cast for pointers in different address spaces"
+ "(must be an address space cast)!");
}
return Builder.CreateBitCast(Src, DstTy);
}
@@ -1344,9 +1392,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
E->getType()->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
- return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ return CGF.GetAddressOfBaseClass(
+ Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
}
case CK_Dynamic: {
Value *V = Visit(const_cast<Expr*>(E));
@@ -1364,8 +1412,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// anything here.
if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
- assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
- ->getElementType()) &&
+ V = CGF.Builder.CreatePointerCast(
+ V, ConvertType(E->getType())->getPointerTo(
+ V->getType()->getPointerAddressSpace()));
+
+ assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
"Expected pointer to array");
V = Builder.CreateStructGEP(V, 0, "arraydecay");
}
@@ -1528,7 +1579,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -1576,9 +1627,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// checking, and fall into the slow path with the atomic cmpxchg loop.
if (!type->isBooleanType() && type->isIntegerType() &&
!(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
llvm::AtomicRMWInst::Sub;
llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
@@ -1627,7 +1678,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
} else if (CanOverflow && type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) {
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
BinOpInfo BinOp;
BinOp.LHS = value;
BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
@@ -1691,7 +1742,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Add the inc/dec to the real part.
llvm::Value *amt;
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
// Another special case: half FP increment should be done via float
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
@@ -1714,7 +1766,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
CGF.CGM.FloatTy),
@@ -1740,11 +1793,11 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LV, RValue::get(atomicPHI), RValue::get(CGF.EmitToMemory(value, type)),
+ E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2019,10 +2072,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
QualType type = atomicTy->getValueType();
if (!type->isBooleanType() && type->isIntegerType() &&
- !(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
- CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ !(type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
+ CGF.getLangOpts().getSignedOverflowBehavior() !=
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
switch (OpInfo.Opcode) {
// We don't have atomicrmw operands for *, %, /, <<, >>
@@ -2084,11 +2137,11 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LHSLV, RValue::get(atomicPHI),
+ RValue::get(CGF.EmitToMemory(Result, LHSTy)), E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2131,12 +2184,14 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
- llvm::Value *Cond = nullptr;
+ SmallVector<std::pair<llvm::Value *, SanitizerKind>, 2> Checks;
- if (CGF.SanOpts->IntegerDivideByZero)
- Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
+ SanitizerKind::IntegerDivideByZero));
+ }
- if (CGF.SanOpts->SignedIntegerOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
@@ -2146,26 +2201,29 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
- llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
- Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
+ llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Checks.push_back(
+ std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
}
- if (Cond)
- EmitBinOpCheck(Cond, Ops);
+ if (Checks.size() > 0)
+ EmitBinOpCheck(Checks, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
{
CodeGenFunction::SanitizerScope SanScope(&CGF);
- if ((CGF.SanOpts->IntegerDivideByZero ||
- CGF.SanOpts->SignedIntegerOverflow) &&
+ if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
+ CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
Ops.Ty->isIntegerType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- } else if (CGF.SanOpts->FloatDivideByZero &&
+ } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
Ops.Ty->isRealFloatingType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
- EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
+ llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
+ EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
+ Ops);
}
}
@@ -2189,7 +2247,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (CGF.SanOpts->IntegerDivideByZero) {
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
@@ -2248,9 +2306,12 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
if (handlerName->empty()) {
// If the signed-integer-overflow sanitizer is enabled, emit a call to its
// runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
- if (!isSigned || CGF.SanOpts->SignedIntegerOverflow) {
+ if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ llvm::Value *NotOverflow = Builder.CreateNot(overflow);
+ SanitizerKind Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
+ : SanitizerKind::UnsignedIntegerOverflow;
+ EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
} else
CGF.EmitTrapCheck(Builder.CreateNot(overflow));
return result;
@@ -2336,7 +2397,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (isSubtraction)
index = CGF.Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
/*Accessed*/ false);
@@ -2476,7 +2537,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2484,7 +2545,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2506,7 +2568,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2514,7 +2576,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2601,7 +2664,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
@@ -2638,7 +2701,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Valid = P;
}
- EmitBinOpCheck(Valid, Ops);
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
@@ -2654,10 +2717,12 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)), Ops);
+ llvm::Value *Valid =
+ Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
@@ -2708,6 +2773,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
assert(E->getOpcode() == BO_EQ ||
E->getOpcode() == BO_NE);
@@ -2715,7 +2781,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *RHS = CGF.EmitScalarExpr(E->getRHS());
Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
- } else if (!LHSTy->isAnyComplexType()) {
+ } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
@@ -2803,10 +2869,28 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
} else {
// Complex Comparison: can only be an equality comparison.
- CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
- CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
-
- QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+ CodeGenFunction::ComplexPairTy LHS, RHS;
+ QualType CETy;
+ if (auto *CTy = LHSTy->getAs<ComplexType>()) {
+ LHS = CGF.EmitComplexExpr(E->getLHS());
+ CETy = CTy->getElementType();
+ } else {
+ LHS.first = Visit(E->getLHS());
+ LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
+ CETy = LHSTy;
+ }
+ if (auto *CTy = RHSTy->getAs<ComplexType>()) {
+ RHS = CGF.EmitComplexExpr(E->getRHS());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy,
+ CTy->getElementType()) &&
+ "The element types must always match.");
+ (void)CTy;
+ } else {
+ RHS.first = Visit(E->getRHS());
+ RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
+ "The element types must always match.");
+ }
Value *ResultR, *ResultI;
if (CETy->isRealFloatingType()) {
@@ -2959,7 +3043,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Emit an unconditional branch from this block to ContBlock.
{
// There is no need to emit line number for unconditional branch.
- SuppressDebugLocation S(Builder);
+ ApplyDebugLocation DL(CGF);
CGF.EmitBlock(ContBlock);
}
// Insert an entry into the phi node for the edge with the value of RHSCond.
@@ -3232,8 +3316,12 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
// If EmitVAArg promoted the type, we must truncate it.
- if (ArgTy != Val->getType())
- Val = Builder.CreateTrunc(Val, ArgTy);
+ if (ArgTy != Val->getType()) {
+ if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
+ Val = Builder.CreateIntToPtr(Val, ArgTy);
+ else
+ Val = Builder.CreateTrunc(Val, ArgTy);
+ }
return Val;
}