diff options
Diffstat (limited to 'clang/lib/CodeGen/CGExprScalar.cpp')
-rw-r--r-- | clang/lib/CodeGen/CGExprScalar.cpp | 209 |
1 files changed, 154 insertions, 55 deletions
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 55a413a2a717..3f23fe11e4f5 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -14,11 +14,13 @@ #include "CGCleanup.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" +#include "CGOpenMPRuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/RecordLayout.h" @@ -34,6 +36,7 @@ #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsPowerPC.h" #include "llvm/IR/Module.h" #include <cstdarg> @@ -615,7 +618,7 @@ public: if (isa<MemberPointerType>(E->getType())) // never sugared return CGF.CGM.getMemberPointerConstant(E); - return EmitLValue(E->getSubExpr()).getPointer(); + return EmitLValue(E->getSubExpr()).getPointer(CGF); } Value *VisitUnaryDeref(const UnaryOperator *E) { if (E->getType()->isVoidType()) @@ -644,8 +647,8 @@ public: auto &Ctx = CGF.getContext(); APValue Evaluated = SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); - return ConstantEmitter(CGF.CGM, &CGF) - .emitAbstract(SLE->getLocation(), Evaluated, SLE->getType()); + return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, + SLE->getType()); } Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { @@ -795,17 +798,17 @@ public: // Comparisons. Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, llvm::CmpInst::Predicate SICmpOpc, - llvm::CmpInst::Predicate FCmpOpc); -#define VISITCOMP(CODE, UI, SI, FP) \ + llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); +#define VISITCOMP(CODE, UI, SI, FP, SIG) \ Value *VisitBin##CODE(const BinaryOperator *E) { \ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ - llvm::FCmpInst::FP); } - VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT) - VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT) - VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE) - VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE) - VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ) - VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE) + llvm::FCmpInst::FP, SIG); } + VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) + VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) + VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) + VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) + VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) + VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) #undef VISITCOMP Value *VisitBinAssign (const BinaryOperator *E); @@ -976,6 +979,11 @@ EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, return std::make_pair(Kind, std::make_pair(Check, Mask)); } +static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + QualType SrcType, QualType DstType) { + return SrcType->isIntegerType() && DstType->isIntegerType(); +} + void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, QualType DstType, SourceLocation Loc) { @@ -984,7 +992,8 @@ void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, // We only care about int->int conversions here. // We ignore conversions to/from pointer and/or bool. - if (!(SrcType->isIntegerType() && DstType->isIntegerType())) + if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, + DstType)) return; unsigned SrcBits = Src->getType()->getScalarSizeInBits(); @@ -1095,7 +1104,8 @@ void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, // We only care about int->int conversions here. // We ignore conversions to/from pointer and/or bool. - if (!(SrcType->isIntegerType() && DstType->isIntegerType())) + if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, + DstType)) return; bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); @@ -1972,7 +1982,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueBitCast: case CK_ObjCObjectLValueCast: { - Address Addr = EmitLValue(E).getAddress(); + Address Addr = EmitLValue(E).getAddress(CGF); Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); LValue LV = CGF.MakeAddrLValue(Addr, DestTy); return EmitLoadOfLValue(LV, CE->getExprLoc()); @@ -1980,7 +1990,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueToRValueBitCast: { LValue SourceLVal = CGF.EmitLValue(E); - Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(), + Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF), CGF.ConvertTypeForMem(DestTy)); LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); @@ -2098,7 +2108,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_ArrayToPointerDecay: return CGF.EmitArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: - return EmitLValue(E).getPointer(); + return EmitLValue(E).getPointer(CGF); case CK_NullToPointer: if (MustVisitNullValue(E)) @@ -2346,10 +2356,29 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( llvm_unreachable("Unknown SignedOverflowBehaviorTy"); } +namespace { +/// Handles check and update for lastprivate conditional variables. +class OMPLastprivateConditionalUpdateRAII { +private: + CodeGenFunction &CGF; + const UnaryOperator *E; + +public: + OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, + const UnaryOperator *E) + : CGF(CGF), E(E) {} + ~OMPLastprivateConditionalUpdateRAII() { + if (CGF.getLangOpts().OpenMP) + CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( + CGF, E->getSubExpr()); + } +}; +} // namespace + llvm::Value * ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { - + OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); QualType type = E->getSubExpr()->getType(); llvm::PHINode *atomicPHI = nullptr; llvm::Value *value; @@ -2363,14 +2392,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (isInc && type->isBooleanType()) { llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); if (isPre) { - Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified()) - ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); + Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified()) + ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); return Builder.getTrue(); } // For atomic bool increment, we just store true and return it for // preincrement, do an atomic swap with true for postincrement return Builder.CreateAtomicRMW( - llvm::AtomicRMWInst::Xchg, LV.getPointer(), True, + llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True, llvm::AtomicOrdering::SequentiallyConsistent); } // Special case for atomic increment / decrement on integers, emit @@ -2387,8 +2416,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::Instruction::Sub; llvm::Value *amt = CGF.EmitToMemory( llvm::ConstantInt::get(ConvertType(type), 1, true), type); - llvm::Value *old = Builder.CreateAtomicRMW(aop, - LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent); + llvm::Value *old = + Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt, + llvm::AtomicOrdering::SequentiallyConsistent); return isPre ? Builder.CreateBinOp(op, old, amt) : old; } value = EmitLoadOfLValue(LV, E->getExprLoc()); @@ -2419,9 +2449,51 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, // Most common case by far: integer increment. } else if (type->isIntegerType()) { - // Note that signed integer inc/dec with width less than int can't - // overflow because of promotion rules; we're just eliding a few steps here. - if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { + QualType promotedType; + bool canPerformLossyDemotionCheck = false; + if (type->isPromotableIntegerType()) { + promotedType = CGF.getContext().getPromotedIntegerType(type); + assert(promotedType != type && "Shouldn't promote to the same type."); + canPerformLossyDemotionCheck = true; + canPerformLossyDemotionCheck &= + CGF.getContext().getCanonicalType(type) != + CGF.getContext().getCanonicalType(promotedType); + canPerformLossyDemotionCheck &= + PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( + type, promotedType); + assert((!canPerformLossyDemotionCheck || + type->isSignedIntegerOrEnumerationType() || + promotedType->isSignedIntegerOrEnumerationType() || + ConvertType(type)->getScalarSizeInBits() == + ConvertType(promotedType)->getScalarSizeInBits()) && + "The following check expects that if we do promotion to different " + "underlying canonical type, at least one of the types (either " + "base or promoted) will be signed, or the bitwidths will match."); + } + if (CGF.SanOpts.hasOneOf( + SanitizerKind::ImplicitIntegerArithmeticValueChange) && + canPerformLossyDemotionCheck) { + // While `x += 1` (for `x` with width less than int) is modeled as + // promotion+arithmetics+demotion, and we can catch lossy demotion with + // ease; inc/dec with width less than int can't overflow because of + // promotion rules, so we omit promotion+demotion, which means that we can + // not catch lossy "demotion". Because we still want to catch these cases + // when the sanitizer is enabled, we perform the promotion, then perform + // the increment/decrement in the wider type, and finally + // perform the demotion. This will catch lossy demotions. + + value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); + Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); + value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); + // Do pass non-default ScalarConversionOpts so that sanitizer check is + // emitted. + value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), + ScalarConversionOpts(CGF.SanOpts)); + + // Note that signed integer inc/dec with width less than int can't + // overflow because of promotion rules; we're just eliding a few steps + // here. + } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); } else if (E->canOverflow() && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { @@ -2849,7 +2921,8 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && CGF.getLangOpts().getSignedOverflowBehavior() != LangOptions::SOB_Trapping) { - llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP; + llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; + llvm::Instruction::BinaryOps Op; switch (OpInfo.Opcode) { // We don't have atomicrmw operands for *, %, /, <<, >> case BO_MulAssign: case BO_DivAssign: @@ -2858,30 +2931,40 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( case BO_ShrAssign: break; case BO_AddAssign: - aop = llvm::AtomicRMWInst::Add; + AtomicOp = llvm::AtomicRMWInst::Add; + Op = llvm::Instruction::Add; break; case BO_SubAssign: - aop = llvm::AtomicRMWInst::Sub; + AtomicOp = llvm::AtomicRMWInst::Sub; + Op = llvm::Instruction::Sub; break; case BO_AndAssign: - aop = llvm::AtomicRMWInst::And; + AtomicOp = llvm::AtomicRMWInst::And; + Op = llvm::Instruction::And; break; case BO_XorAssign: - aop = llvm::AtomicRMWInst::Xor; + AtomicOp = llvm::AtomicRMWInst::Xor; + Op = llvm::Instruction::Xor; break; case BO_OrAssign: - aop = llvm::AtomicRMWInst::Or; + AtomicOp = llvm::AtomicRMWInst::Or; + Op = llvm::Instruction::Or; break; default: llvm_unreachable("Invalid compound assignment type"); } - if (aop != llvm::AtomicRMWInst::BAD_BINOP) { - llvm::Value *amt = CGF.EmitToMemory( + if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { + llvm::Value *Amt = CGF.EmitToMemory( EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, E->getExprLoc()), LHSTy); - Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt, + Value *OldVal = Builder.CreateAtomicRMW( + AtomicOp, LHSLV.getPointer(CGF), Amt, llvm::AtomicOrdering::SequentiallyConsistent); + + // Since operation is atomic, the result type is guaranteed to be the + // same as the input in LLVM terms. + Result = Builder.CreateBinOp(Op, OldVal, Amt); return LHSLV; } } @@ -2934,6 +3017,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( else CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); + if (CGF.getLangOpts().OpenMP) + CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, + E->getLHS()); return LHSLV; } @@ -3201,10 +3287,10 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, expr->getRHS())) return CGF.Builder.CreateIntToPtr(index, pointer->getType()); - if (width != DL.getTypeSizeInBits(PtrTy)) { + if (width != DL.getIndexTypeSizeInBits(PtrTy)) { // Zero-extend or sign-extend the pointer value according to // whether the index is signed or not. - index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned, + index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, "idx.ext"); } @@ -3258,7 +3344,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, // GNU void* casts amount to no-ops since our void* type is i8*, but this is // future proof. if (elementType->isVoidType() || elementType->isFunctionType()) { - Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); + Value *result = CGF.EmitCastToVoidPtr(pointer); result = CGF.Builder.CreateGEP(result, index, "add.ptr"); return CGF.Builder.CreateBitCast(result, pointer->getType()); } @@ -3282,17 +3368,10 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, Value *MulOp0 = MulOp->getOperand(0); Value *MulOp1 = MulOp->getOperand(1); - if (negMul) { - MulOp0 = - Builder.CreateFSub( - llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0, - "neg"); - } else if (negAdd) { - Addend = - Builder.CreateFSub( - llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend, - "neg"); - } + if (negMul) + MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); + if (negAdd) + Addend = Builder.CreateFNeg(Addend, "neg"); Value *FMulAdd = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), @@ -3725,7 +3804,8 @@ static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, llvm::CmpInst::Predicate SICmpOpc, - llvm::CmpInst::Predicate FCmpOpc) { + llvm::CmpInst::Predicate FCmpOpc, + bool IsSignaling) { TestAndClearIgnoreResultAssign(); Value *Result; QualType LHSTy = E->getLHS()->getType(); @@ -3755,8 +3835,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, *SecondVecArg = RHS; QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); - const BuiltinType *BTy = ElTy->getAs<BuiltinType>(); - BuiltinType::Kind ElementKind = BTy->getKind(); + BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); switch(E->getOpcode()) { default: llvm_unreachable("is not a comparison operation"); @@ -3821,7 +3900,10 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, if (BOInfo.isFixedPointBinOp()) { Result = EmitFixedPointBinOp(BOInfo); } else if (LHS->getType()->isFPOrFPVectorTy()) { - Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); + if (!IsSignaling) + Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); + else + Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); } else if (LHSTy->hasSignedIntegerRepresentation()) { Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); } else { @@ -3878,6 +3960,8 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, Value *ResultR, *ResultI; if (CETy->isRealFloatingType()) { + // As complex comparisons can only be equality comparisons, they + // are never signaling comparisons. ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); } else { @@ -3922,7 +4006,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { case Qualifiers::OCL_Weak: RHS = Visit(E->getRHS()); LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); - RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); + RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore); break; case Qualifiers::OCL_None: @@ -4227,6 +4311,21 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { return tmp5; } + if (condExpr->getType()->isVectorType()) { + CGF.incrementProfileCounter(E); + + llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); + llvm::Value *LHS = Visit(lhsExpr); + llvm::Value *RHS = Visit(rhsExpr); + + llvm::Type *CondType = ConvertType(condExpr->getType()); + auto *VecTy = cast<llvm::VectorType>(CondType); + llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); + + CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); + return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); + } + // If this is a really simple expression (like x ? 4 : 5), emit this as a // select instead of as control flow. We can only do this if it is cheap and // safe to evaluate the LHS and RHS unconditionally. @@ -4483,7 +4582,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { if (BaseExpr->isRValue()) { Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign()); } else { - Addr = EmitLValue(BaseExpr).getAddress(); + Addr = EmitLValue(BaseExpr).getAddress(*this); } // Cast the address to Class*. |