diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-06-16 21:03:44 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-06-16 21:03:44 +0000 |
commit | 325377b57338e700317f5e423e5b0f1c08d99a39 (patch) | |
tree | acd401a9713562cf3e93d13fa6a70ad67eb5cd99 /lib/CodeGen | |
parent | 1b08b196ac845675036ac78f3ac927d0a37f707c (diff) |
Notes
Diffstat (limited to 'lib/CodeGen')
-rw-r--r-- | lib/CodeGen/BackendUtil.cpp | 8 | ||||
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 21 | ||||
-rw-r--r-- | lib/CodeGen/CGCall.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CGCoroutine.cpp | 66 | ||||
-rw-r--r-- | lib/CodeGen/CGDebugInfo.cpp | 18 | ||||
-rw-r--r-- | lib/CodeGen/CGExpr.cpp | 47 | ||||
-rw-r--r-- | lib/CodeGen/CGExprScalar.cpp | 51 | ||||
-rw-r--r-- | lib/CodeGen/CGOpenMPRuntime.cpp | 2 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenFunction.h | 4 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenModule.cpp | 10 | ||||
-rw-r--r-- | lib/CodeGen/CodeGenModule.h | 7 |
11 files changed, 162 insertions, 74 deletions
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp index 9c4316fb1cd5a..bd01902a032b7 100644 --- a/lib/CodeGen/BackendUtil.cpp +++ b/lib/CodeGen/BackendUtil.cpp @@ -964,11 +964,11 @@ Expected<BitcodeModule> clang::FindThinLTOModule(MemoryBufferRef MBRef) { if (!BMsOrErr) return BMsOrErr.takeError(); - // The bitcode file may contain multiple modules, we want the one with a - // summary. + // The bitcode file may contain multiple modules, we want the one that is + // marked as being the ThinLTO module. for (BitcodeModule &BM : *BMsOrErr) { - Expected<bool> HasSummary = BM.hasSummary(); - if (HasSummary && *HasSummary) + Expected<BitcodeLTOInfo> LTOInfo = BM.getLTOInfo(); + if (LTOInfo && LTOInfo->IsThinLTO) return BM; } diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 3b4f8854a9ca6..8f0c22d1f7ef5 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -7923,6 +7923,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } // We can't handle 8-31 immediates with native IR, use the intrinsic. + // Except for predicates that create constants. Intrinsic::ID ID; switch (BuiltinID) { default: llvm_unreachable("Unsupported intrinsic!"); @@ -7930,12 +7931,32 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, ID = Intrinsic::x86_sse_cmp_ps; break; case X86::BI__builtin_ia32_cmpps256: + // _CMP_TRUE_UQ, _CMP_TRUE_US produce -1,-1... vector + // on any input and _CMP_FALSE_OQ, _CMP_FALSE_OS produce 0, 0... + if (CC == 0xf || CC == 0xb || CC == 0x1b || CC == 0x1f) { + Value *Constant = (CC == 0xf || CC == 0x1f) ? + llvm::Constant::getAllOnesValue(Builder.getInt32Ty()) : + llvm::Constant::getNullValue(Builder.getInt32Ty()); + Value *Vec = Builder.CreateVectorSplat( + Ops[0]->getType()->getVectorNumElements(), Constant); + return Builder.CreateBitCast(Vec, Ops[0]->getType()); + } ID = Intrinsic::x86_avx_cmp_ps_256; break; case X86::BI__builtin_ia32_cmppd: ID = Intrinsic::x86_sse2_cmp_pd; break; case X86::BI__builtin_ia32_cmppd256: + // _CMP_TRUE_UQ, _CMP_TRUE_US produce -1,-1... vector + // on any input and _CMP_FALSE_OQ, _CMP_FALSE_OS produce 0, 0... + if (CC == 0xf || CC == 0xb || CC == 0x1b || CC == 0x1f) { + Value *Constant = (CC == 0xf || CC == 0x1f) ? + llvm::Constant::getAllOnesValue(Builder.getInt64Ty()) : + llvm::Constant::getNullValue(Builder.getInt64Ty()); + Value *Vec = Builder.CreateVectorSplat( + Ops[0]->getType()->getVectorNumElements(), Constant); + return Builder.CreateBitCast(Vec, Ops[0]->getType()); + } ID = Intrinsic::x86_avx_cmp_pd_256; break; } diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 079064733585b..c65dc18be3068 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -1795,6 +1795,8 @@ void CodeGenModule::ConstructAttributeList( FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); if (TargetDecl->hasAttr<NoReturnAttr>()) FuncAttrs.addAttribute(llvm::Attribute::NoReturn); + if (TargetDecl->hasAttr<ColdAttr>()) + FuncAttrs.addAttribute(llvm::Attribute::Cold); if (TargetDecl->hasAttr<NoDuplicateAttr>()) FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); if (TargetDecl->hasAttr<ConvergentAttr>()) diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp index bc5f6327c9a05..a65faa602b331 100644 --- a/lib/CodeGen/CGCoroutine.cpp +++ b/lib/CodeGen/CGCoroutine.cpp @@ -148,25 +148,18 @@ static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) { // // See llvm's docs/Coroutines.rst for more details. // -static RValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro, +namespace { + struct LValueOrRValue { + LValue LV; + RValue RV; + }; +} +static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro, CoroutineSuspendExpr const &S, AwaitKind Kind, AggValueSlot aggSlot, - bool ignoreResult) { + bool ignoreResult, bool forLValue) { auto *E = S.getCommonExpr(); - // FIXME: rsmith 5/22/2017. Does it still make sense for us to have a - // UO_Coawait at all? As I recall, the only purpose it ever had was to - // represent a dependent co_await expression that couldn't yet be resolved to - // a CoawaitExpr. But now we have (and need!) a separate DependentCoawaitExpr - // node to store unqualified lookup results, it seems that the UnaryOperator - // portion of the representation serves no purpose (and as seen in this patch, - // it's getting in the way). Can we remove it? - - // Skip passthrough operator co_await (present when awaiting on an LValue). - if (auto *UO = dyn_cast<UnaryOperator>(E)) - if (UO->getOpcode() == UO_Coawait) - E = UO->getSubExpr(); - auto Binder = CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E); auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); @@ -217,7 +210,12 @@ static RValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro, // Emit await_resume expression. CGF.EmitBlock(ReadyBlock); - return CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + LValueOrRValue Res; + if (forLValue) + Res.LV = CGF.EmitLValue(S.getResumeExpr()); + else + Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + return Res; } RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E, @@ -225,13 +223,13 @@ RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E, bool ignoreResult) { return emitSuspendExpression(*this, *CurCoro.Data, E, CurCoro.Data->CurrentAwaitKind, aggSlot, - ignoreResult); + ignoreResult, /*forLValue*/false).RV; } RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot, bool ignoreResult) { return emitSuspendExpression(*this, *CurCoro.Data, E, AwaitKind::Yield, - aggSlot, ignoreResult); + aggSlot, ignoreResult, /*forLValue*/false).RV; } void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) { @@ -240,6 +238,38 @@ void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) { EmitBranchThroughCleanup(CurCoro.Data->FinalJD); } + +#ifndef NDEBUG +static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx, + const CoroutineSuspendExpr *E) { + const auto *RE = E->getResumeExpr(); + // Is it possible for RE to be a CXXBindTemporaryExpr wrapping + // a MemberCallExpr? + assert(isa<CallExpr>(RE) && "unexpected suspend expression type"); + return cast<CallExpr>(RE)->getCallReturnType(Ctx); +} +#endif + +LValue +CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) { + assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + return emitSuspendExpression(*this, *CurCoro.Data, *E, + CurCoro.Data->CurrentAwaitKind, AggValueSlot::ignored(), + /*ignoreResult*/false, /*forLValue*/true).LV; +} + +LValue +CodeGenFunction::EmitCoyieldLValue(const CoyieldExpr *E) { + assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + return emitSuspendExpression(*this, *CurCoro.Data, *E, + AwaitKind::Yield, AggValueSlot::ignored(), + /*ignoreResult*/false, /*forLValue*/true).LV; +} + // Hunts for the parameter reference in the parameter copy/move declaration. namespace { struct GetParamRef : public StmtVisitor<GetParamRef> { diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp index ebb264eb61c9f..b00d296fe34a4 100644 --- a/lib/CodeGen/CGDebugInfo.cpp +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -1041,7 +1041,13 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl, assert(SizeInBits > 0 && "found named 0-width bitfield"); uint64_t StorageOffsetInBits = CGM.getContext().toBits(BitFieldInfo.StorageOffset); - uint64_t OffsetInBits = StorageOffsetInBits + BitFieldInfo.Offset; + uint64_t Offset = BitFieldInfo.Offset; + // The bit offsets for big endian machines are reversed for big + // endian target, compensate for that as the DIDerivedType requires + // un-reversed offsets. + if (CGM.getDataLayout().isBigEndian()) + Offset = BitFieldInfo.StorageSize - BitFieldInfo.Size - Offset; + uint64_t OffsetInBits = StorageOffsetInBits + Offset; llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD); return DBuilder.createBitFieldMemberType( RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits, @@ -3484,13 +3490,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage, if (VD->hasAttr<BlocksAttr>()) { // Here, we need an offset *into* the alloca. CharUnits offset = CharUnits::fromQuantity(32); - Expr.push_back(llvm::dwarf::DW_OP_plus); + Expr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerWidth(0)); Expr.push_back(offset.getQuantity()); Expr.push_back(llvm::dwarf::DW_OP_deref); - Expr.push_back(llvm::dwarf::DW_OP_plus); + Expr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); Expr.push_back(offset.getQuantity()); @@ -3599,17 +3605,17 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable( SmallVector<int64_t, 9> addr; addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus); + addr.push_back(llvm::dwarf::DW_OP_plus_uconst); addr.push_back(offset.getQuantity()); if (isByRef) { addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus); + addr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0)); addr.push_back(offset.getQuantity()); addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus); + addr.push_back(llvm::dwarf::DW_OP_plus_uconst); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); addr.push_back(offset.getQuantity()); diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index 9f800a75b5bc8..7359006677f42 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -549,6 +549,11 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, if (Ptr->getType()->getPointerAddressSpace()) return; + // Don't check pointers to volatile data. The behavior here is implementation- + // defined. + if (Ty.isVolatileQualified()) + return; + SanitizerScope SanScope(this); SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; @@ -1158,6 +1163,11 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { case Expr::MaterializeTemporaryExprClass: return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); + + case Expr::CoawaitExprClass: + return EmitCoawaitLValue(cast<CoawaitExpr>(E)); + case Expr::CoyieldExprClass: + return EmitCoyieldLValue(cast<CoyieldExpr>(E)); } } @@ -3002,10 +3012,11 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Value *ptr, ArrayRef<llvm::Value*> indices, bool inbounds, + bool signedIndices, SourceLocation loc, const llvm::Twine &name = "arrayidx") { if (inbounds) { - return CGF.EmitCheckedInBoundsGEP(ptr, indices, loc, name); + return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices, loc, name); } else { return CGF.Builder.CreateGEP(ptr, indices, name); } @@ -3038,7 +3049,7 @@ static QualType getFixedSizeElementType(const ASTContext &ctx, static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, ArrayRef<llvm::Value *> indices, QualType eltType, bool inbounds, - SourceLocation loc, + bool signedIndices, SourceLocation loc, const llvm::Twine &name = "arrayidx") { // All the indices except that last must be zero. #ifndef NDEBUG @@ -3058,8 +3069,8 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, CharUnits eltAlign = getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); - llvm::Value *eltPtr = - emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, loc, name); + llvm::Value *eltPtr = emitArraySubscriptGEP( + CGF, addr.getPointer(), indices, inbounds, signedIndices, loc, name); return Address(eltPtr, eltAlign); } @@ -3069,6 +3080,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, // in lexical order (this complexity is, sadly, required by C++17). llvm::Value *IdxPre = (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; + bool SignedIndices = false; auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { auto *Idx = IdxPre; if (E->getLHS() != E->getIdx()) { @@ -3078,6 +3090,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, QualType IdxTy = E->getIdx()->getType(); bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); + SignedIndices |= IdxSigned; if (SanOpts.has(SanitizerKind::ArrayBounds)) EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); @@ -3113,7 +3126,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, - E->getExprLoc()); + SignedIndices, E->getExprLoc()); return MakeAddrLValue(Addr, EltType, LV.getBaseInfo()); } @@ -3142,7 +3155,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + SignedIndices, E->getExprLoc()); } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ // Indexing over an interface, as in "NSString *P; P[4];" @@ -3167,8 +3180,9 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, // Do the GEP. CharUnits EltAlign = getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); - llvm::Value *EltPtr = emitArraySubscriptGEP( - *this, Addr.getPointer(), ScaledIdx, false, E->getExprLoc()); + llvm::Value *EltPtr = + emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false, + SignedIndices, E->getExprLoc()); Addr = Address(EltPtr, EltAlign); // Cast back. @@ -3190,11 +3204,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, auto *Idx = EmitIdxAfterBase(/*Promote*/true); // Propagate the alignment from the array itself to the result. - Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(), - {CGM.getSize(CharUnits::Zero()), Idx}, - E->getType(), - !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + Addr = emitArraySubscriptGEP( + *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, + E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, + E->getExprLoc()); BaseInfo = ArrayLV.getBaseInfo(); } else { // The base must be a pointer; emit it with an estimate of its alignment. @@ -3202,7 +3215,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, auto *Idx = EmitIdxAfterBase(/*Promote*/true); Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + SignedIndices, E->getExprLoc()); } LValue LV = MakeAddrLValue(Addr, E->getType(), BaseInfo); @@ -3375,7 +3388,7 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, Idx = Builder.CreateNSWMul(Idx, NumElements); EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + /*SignedIndices=*/false, E->getExprLoc()); } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { // If this is A[i] where A is an array, the frontend will have decayed the // base to be a ArrayToPointerDecay implicit cast. While correct, it is @@ -3395,14 +3408,14 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, EltPtr = emitArraySubscriptGEP( *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + /*SignedIndices=*/false, E->getExprLoc()); BaseInfo = ArrayLV.getBaseInfo(); } else { Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, BaseTy, ResultExprTy, IsLowerBound); EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, !getLangOpts().isSignedOverflowDefined(), - E->getExprLoc()); + /*SignedIndices=*/false, E->getExprLoc()); } return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo); diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index f9d1fe468748f..43c86495f3d3c 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -1851,6 +1851,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::Value *input; int amount = (isInc ? 1 : -1); + bool signedIndex = !isInc; if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { type = atomicTy->getValueType(); @@ -1940,8 +1941,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, numElts, "vla.inc"); else - value = CGF.EmitCheckedInBoundsGEP(value, numElts, E->getExprLoc(), - "vla.inc"); + value = CGF.EmitCheckedInBoundsGEP(value, numElts, signedIndex, + E->getExprLoc(), "vla.inc"); // Arithmetic on function pointers (!) is just +-1. } else if (type->isFunctionType()) { @@ -1951,8 +1952,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.funcptr"); else - value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(), - "incdec.funcptr"); + value = CGF.EmitCheckedInBoundsGEP(value, amt, signedIndex, + E->getExprLoc(), "incdec.funcptr"); value = Builder.CreateBitCast(value, input->getType()); // For everything else, we can just do a simple increment. @@ -1961,8 +1962,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.ptr"); else - value = CGF.EmitCheckedInBoundsGEP(value, amt, E->getExprLoc(), - "incdec.ptr"); + value = CGF.EmitCheckedInBoundsGEP(value, amt, signedIndex, + E->getExprLoc(), "incdec.ptr"); } // Vector increment/decrement. @@ -2043,8 +2044,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, sizeValue, "incdec.objptr"); else - value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, E->getExprLoc(), - "incdec.objptr"); + value = CGF.EmitCheckedInBoundsGEP(value, sizeValue, signedIndex, + E->getExprLoc(), "incdec.objptr"); value = Builder.CreateBitCast(value, input->getType()); } @@ -2661,13 +2662,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, std::swap(pointerOperand, indexOperand); } + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); + bool mayHaveNegativeGEPIndex = isSigned || isSubtraction; + unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); auto &DL = CGF.CGM.getDataLayout(); auto PtrTy = cast<llvm::PointerType>(pointer->getType()); if (width != DL.getTypeSizeInBits(PtrTy)) { // Zero-extend or sign-extend the pointer value according to // whether the index is signed or not. - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned, "idx.ext"); } @@ -2711,8 +2714,9 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); } else { index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); - pointer = CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(), - "add.ptr"); + pointer = + CGF.EmitCheckedInBoundsGEP(pointer, index, mayHaveNegativeGEPIndex, + op.E->getExprLoc(), "add.ptr"); } return pointer; } @@ -2729,8 +2733,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF, if (CGF.getLangOpts().isSignedOverflowDefined()) return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); - return CGF.EmitCheckedInBoundsGEP(pointer, index, op.E->getExprLoc(), - "add.ptr"); + return CGF.EmitCheckedInBoundsGEP(pointer, index, mayHaveNegativeGEPIndex, + op.E->getExprLoc(), "add.ptr"); } // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and @@ -3848,6 +3852,7 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue( Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList, + bool SignedIndices, SourceLocation Loc, const Twine &Name) { Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name); @@ -3905,7 +3910,7 @@ Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, auto *ResultAndOverflow = Builder.CreateCall( (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); OffsetOverflows = Builder.CreateOr( - OffsetOverflows, Builder.CreateExtractValue(ResultAndOverflow, 1)); + Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); return Builder.CreateExtractValue(ResultAndOverflow, 0); }; @@ -3951,12 +3956,18 @@ Value *CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, // 1) The total offset doesn't overflow, and // 2) The sign of the difference between the computed address and the base // pointer matches the sign of the total offset. - llvm::Value *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); - llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); - auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero); - llvm::Value *ValidGEP = Builder.CreateAnd( - Builder.CreateNot(OffsetOverflows), - Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid)); + llvm::Value *ValidGEP; + auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows); + auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); + if (SignedIndices) { + auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero); + llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); + ValidGEP = Builder.CreateAnd( + Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid), + NoOffsetOverflow); + } else { + ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow); + } llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp index 468838e56e383..8d83255ac139c 100644 --- a/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/lib/CodeGen/CGOpenMPRuntime.cpp @@ -6327,7 +6327,7 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) { } } - // If we are in target mode we do not emit any global (declare target is not + // If we are in target mode, we do not emit any global (declare target is not // implemented yet). Therefore we signal that GD was processed in this case. return true; } diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index 42ffd0d3efcca..831eedf9e4780 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -2550,9 +2550,11 @@ public: RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + LValue EmitCoawaitLValue(const CoawaitExpr *E); RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot = AggValueSlot::ignored(), bool ignoreResult = false); + LValue EmitCoyieldLValue(const CoyieldExpr *E); RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); @@ -3554,8 +3556,10 @@ public: /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to /// detect undefined behavior when the pointer overflow sanitizer is enabled. + /// \p SignedIndices indicates whether any of the GEP indices are signed. llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr, ArrayRef<llvm::Value *> IdxList, + bool SignedIndices, SourceLocation Loc, const Twine &Name = ""); diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp index 77adf7b441a2f..19a055075604f 100644 --- a/lib/CodeGen/CodeGenModule.cpp +++ b/lib/CodeGen/CodeGenModule.cpp @@ -1243,7 +1243,7 @@ void CodeGenModule::AddDependentLib(StringRef Lib) { /// \brief Add link options implied by the given module, including modules /// it depends on, using a postorder walk. static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod, - SmallVectorImpl<llvm::Metadata *> &Metadata, + SmallVectorImpl<llvm::MDNode *> &Metadata, llvm::SmallPtrSet<Module *, 16> &Visited) { // Import this module's parent. if (Mod->Parent && Visited.insert(Mod->Parent).second) { @@ -1331,7 +1331,7 @@ void CodeGenModule::EmitModuleLinkOptions() { // Add link options for all of the imported modules in reverse topological // order. We don't do anything to try to order import link flags with respect // to linker options inserted by things like #pragma comment(). - SmallVector<llvm::Metadata *, 16> MetadataArgs; + SmallVector<llvm::MDNode *, 16> MetadataArgs; Visited.clear(); for (Module *M : LinkModules) if (Visited.insert(M).second) @@ -1340,9 +1340,9 @@ void CodeGenModule::EmitModuleLinkOptions() { LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end()); // Add the linker options metadata flag. - getModule().addModuleFlag(llvm::Module::AppendUnique, "Linker Options", - llvm::MDNode::get(getLLVMContext(), - LinkerOptionsMetadata)); + auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options"); + for (auto *MD : LinkerOptionsMetadata) + NMD->addOperand(MD); } void CodeGenModule::EmitDeferred() { diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h index 0a71c635e8f0e..59e56a6ba1941 100644 --- a/lib/CodeGen/CodeGenModule.h +++ b/lib/CodeGen/CodeGenModule.h @@ -429,7 +429,7 @@ private: llvm::SmallPtrSet<clang::Module *, 16> EmittedModuleInitializers; /// \brief A vector of metadata strings. - SmallVector<llvm::Metadata *, 16> LinkerOptionsMetadata; + SmallVector<llvm::MDNode *, 16> LinkerOptionsMetadata; /// @name Cache for Objective-C runtime types /// @{ @@ -1058,13 +1058,14 @@ public: void RefreshTypeCacheForClass(const CXXRecordDecl *Class); - /// \brief Appends Opts to the "Linker Options" metadata value. + /// \brief Appends Opts to the "llvm.linker.options" metadata value. void AppendLinkerOptions(StringRef Opts); /// \brief Appends a detect mismatch command to the linker options. void AddDetectMismatch(StringRef Name, StringRef Value); - /// \brief Appends a dependent lib to the "Linker Options" metadata value. + /// \brief Appends a dependent lib to the "llvm.linker.options" metadata + /// value. void AddDependentLib(StringRef Lib); llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD); |