diff options
Diffstat (limited to 'contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp')
| -rw-r--r-- | contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp | 199 |
1 files changed, 142 insertions, 57 deletions
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp index cf8024550eee..e4803fde230f 100644 --- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp +++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp @@ -19,6 +19,7 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "TargetInfo.h" +#include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" @@ -28,7 +29,6 @@ #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/CodeGen/SwiftCallingConv.h" #include "llvm/ADT/StringExtras.h" -#include "llvm/Transforms/Utils/Local.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" @@ -36,6 +36,7 @@ #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/Transforms/Utils/Local.h" using namespace clang; using namespace CodeGen; @@ -903,7 +904,7 @@ struct NoExpansion : TypeExpansion { static std::unique_ptr<TypeExpansion> getTypeExpansion(QualType Ty, const ASTContext &Context) { if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { - return llvm::make_unique<ConstantArrayExpansion>( + return std::make_unique<ConstantArrayExpansion>( AT->getElementType(), AT->getSize().getZExtValue()); } if (const RecordType *RT = Ty->getAs<RecordType>()) { @@ -947,13 +948,13 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) { Fields.push_back(FD); } } - return llvm::make_unique<RecordExpansion>(std::move(Bases), + return std::make_unique<RecordExpansion>(std::move(Bases), std::move(Fields)); } if (const ComplexType *CT = Ty->getAs<ComplexType>()) { - return llvm::make_unique<ComplexExpansion>(CT->getElementType()); + return std::make_unique<ComplexExpansion>(CT->getElementType()); } - return llvm::make_unique<NoExpansion>(); + return std::make_unique<NoExpansion>(); } static int getExpansionSize(QualType Ty, const ASTContext &Context) { @@ -1020,13 +1021,13 @@ void CodeGenFunction::ExpandTypeFromArgs( auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { - forConstantArrayExpansion(*this, CAExp, LV.getAddress(), - [&](Address EltAddr) { - LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); - ExpandTypeFromArgs(CAExp->EltTy, LV, AI); - }); + forConstantArrayExpansion( + *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { + LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); + ExpandTypeFromArgs(CAExp->EltTy, LV, AI); + }); } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { - Address This = LV.getAddress(); + Address This = LV.getAddress(*this); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. Address Base = @@ -1047,8 +1048,13 @@ void CodeGenFunction::ExpandTypeFromArgs( auto imagValue = *AI++; EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); } else { + // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a + // primitive store. assert(isa<NoExpansion>(Exp.get())); - EmitStoreThroughLValue(RValue::get(*AI++), LV); + if (LV.isBitField()) + EmitStoreThroughLValue(RValue::get(*AI++), LV); + else + EmitStoreOfScalar(*AI++, LV); } } @@ -1057,7 +1063,7 @@ void CodeGenFunction::ExpandTypeToArgs( SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { - Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() + Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); forConstantArrayExpansion( *this, CAExp, Addr, [&](Address EltAddr) { @@ -1068,7 +1074,7 @@ void CodeGenFunction::ExpandTypeToArgs( IRCallArgPos); }); } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { - Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress() + Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) : Arg.getKnownRValue().getAggregateAddress(); for (const CXXBaseSpecifier *BS : RExp->Bases) { // Perform a single step derived-to-base conversion. @@ -1305,6 +1311,15 @@ static void CreateCoercedStore(llvm::Value *Src, DstTy = Dst.getType()->getElementType(); } + llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); + llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); + if (SrcPtrTy && DstPtrTy && + SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { + Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); + CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); + return; + } + // If the source and destination are integer or pointer types, just do an // extension or truncation to the desired type. if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && @@ -1713,24 +1728,28 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, if (!CodeGenOpts.TrapFuncName.empty()) FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); } else { - // Attributes that should go on the function, but not the call site. - if (!CodeGenOpts.DisableFPElim) { - FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); - } else if (CodeGenOpts.OmitLeafFramePointer) { - FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); - FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); - } else { - FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); - FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); + StringRef FpKind; + switch (CodeGenOpts.getFramePointer()) { + case CodeGenOptions::FramePointerKind::None: + FpKind = "none"; + break; + case CodeGenOptions::FramePointerKind::NonLeaf: + FpKind = "non-leaf"; + break; + case CodeGenOptions::FramePointerKind::All: + FpKind = "all"; + break; } + FuncAttrs.addAttribute("frame-pointer", FpKind); FuncAttrs.addAttribute("less-precise-fpmad", llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); if (CodeGenOpts.NullPointerIsValid) FuncAttrs.addAttribute("null-pointer-is-valid", "true"); - if (!CodeGenOpts.FPDenormalMode.empty()) - FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode); + if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::Invalid) + FuncAttrs.addAttribute("denormal-fp-math", + llvm::denormalModeName(CodeGenOpts.FPDenormalMode)); FuncAttrs.addAttribute("no-trapping-math", llvm::toStringRef(CodeGenOpts.NoTrappingMath)); @@ -1850,11 +1869,30 @@ void CodeGenModule::ConstructAttributeList( if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { AddAttributesFromFunctionProtoType( getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); - // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. - // These attributes are not inherited by overloads. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); - if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) - FuncAttrs.addAttribute(llvm::Attribute::NoReturn); + const bool IsVirtualCall = MD && MD->isVirtual(); + // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a + // virtual function. These attributes are not inherited by overloads. + if (!(AttrOnCallSite && IsVirtualCall)) { + if (Fn->isNoReturn()) + FuncAttrs.addAttribute(llvm::Attribute::NoReturn); + + const auto *NBA = Fn->getAttr<NoBuiltinAttr>(); + bool HasWildcard = NBA && llvm::is_contained(NBA->builtinNames(), "*"); + if (getLangOpts().NoBuiltin || HasWildcard) + FuncAttrs.addAttribute("no-builtins"); + else { + auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { + SmallString<32> AttributeName; + AttributeName += "no-builtin-"; + AttributeName += BuiltinName; + FuncAttrs.addAttribute(AttributeName); + }; + llvm::for_each(getLangOpts().NoBuiltinFuncs, AddNoBuiltinAttr); + if (NBA) + llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); + } + } } // 'const', 'pure' and 'noalias' attributed functions are also nounwind. @@ -2123,8 +2161,8 @@ void CodeGenModule::ConstructAttributeList( if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { auto info = getContext().getTypeInfoInChars(PTy); Attrs.addDereferenceableAttr(info.first.getQuantity()); - Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), - info.second.getQuantity())); + Attrs.addAttribute(llvm::Attribute::getWithAlignment( + getLLVMContext(), info.second.getAsAlign())); } break; } @@ -3089,8 +3127,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, // Deactivate the cleanup for the callee-destructed param that was pushed. if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && - type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && - type.isDestructedType()) { + type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && + param->needsDestruction(getContext())) { EHScopeStack::stable_iterator cleanup = CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); assert(cleanup.isValid() && @@ -3109,7 +3147,7 @@ static bool isProvablyNull(llvm::Value *addr) { static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback) { const LValue &srcLV = writeback.Source; - Address srcAddr = srcLV.getAddress(); + Address srcAddr = srcLV.getAddress(CGF); assert(!isProvablyNull(srcAddr.getPointer()) && "shouldn't have writeback for provably null argument"); @@ -3217,7 +3255,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); } - Address srcAddr = srcLV.getAddress(); + Address srcAddr = srcLV.getAddress(CGF); // The dest and src types don't necessarily match in LLVM terms // because of the crazy ObjC compatibility rules. @@ -3531,7 +3569,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const { CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, LV.isVolatile()); IsUsed = true; - return RValue::getAggregate(Copy.getAddress()); + return RValue::getAggregate(Copy.getAddress(CGF)); } void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { @@ -3541,7 +3579,7 @@ void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { else if (!HasLV && RV.isComplex()) CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); else { - auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress(); + auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); // We assume that call args are never copied into subobjects. CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, @@ -3574,7 +3612,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, // However, we still have to push an EH-only cleanup in case we unwind before // we make it to the call. if (HasAggregateEvalKind && - type->getAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { + type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { // If we're using inalloca, use the argument memory. Otherwise, use a // temporary. AggValueSlot Slot; @@ -3838,7 +3876,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, AI = CreateTempAlloca(ArgStruct, "argmem"); } auto Align = CallInfo.getArgStructAlignment(); - AI->setAlignment(Align.getQuantity()); + AI->setAlignment(Align.getAsAlign()); AI->setUsedWithInAlloca(true); assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); ArgMemory = Address(AI, Align); @@ -3875,6 +3913,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address swiftErrorTemp = Address::invalid(); Address swiftErrorArg = Address::invalid(); + // When passing arguments using temporary allocas, we need to add the + // appropriate lifetime markers. This vector keeps track of all the lifetime + // markers that need to be ended right after the call. + SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; + // Translate all of the arguments as necessary to match the IR lowering. assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); @@ -3899,7 +3942,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (I->isAggregate()) { // Replace the placeholder with the appropriate argument slot GEP. Address Addr = I->hasLValue() - ? I->getKnownLValue().getAddress() + ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Instruction *Placeholder = cast<llvm::Instruction>(Addr.getPointer()); @@ -3944,7 +3987,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // 3. If the argument is byval, but RV is not located in default // or alloca address space. Address Addr = I->hasLValue() - ? I->getKnownLValue().getAddress() + ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); llvm::Value *V = Addr.getPointer(); CharUnits Align = ArgInfo.getIndirectAlign(); @@ -3965,9 +4008,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, auto LV = I->getKnownLValue(); auto AS = LV.getAddressSpace(); - if ((!ArgInfo.getIndirectByVal() && - (LV.getAlignment() >= - getContext().getTypeAlignInChars(I->Ty)))) { + if (!ArgInfo.getIndirectByVal() || + (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { NeedCopy = true; } if (!getLangOpts().OpenCL) { @@ -3991,6 +4033,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address AI = CreateMemTempWithoutCast( I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); IRCallArgs[FirstIRArg] = AI.getPointer(); + + // Emit lifetime markers for the temporary alloca. + uint64_t ByvalTempElementSize = + CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); + llvm::Value *LifetimeSize = + EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); + + // Add cleanup code to emit the end lifetime marker after the call. + if (LifetimeSize) // In case we disabled lifetime markers. + CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); + + // Generate the copy. I->copyInto(*this, AI); } else { // Skip the extra memcpy call. @@ -4019,7 +4073,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, V = I->getKnownRValue().getScalarVal(); else V = Builder.CreateLoad( - I->hasLValue() ? I->getKnownLValue().getAddress() + I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress()); // Implement swifterror by copying into a new swifterror argument. @@ -4062,7 +4116,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Src = CreateMemTemp(I->Ty, "coerce"); I->copyInto(*this, Src); } else { - Src = I->hasLValue() ? I->getKnownLValue().getAddress() + Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } @@ -4117,7 +4171,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Address addr = Address::invalid(); Address AllocaAddr = Address::invalid(); if (I->isAggregate()) { - addr = I->hasLValue() ? I->getKnownLValue().getAddress() + addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) : I->getKnownRValue().getAggregateAddress(); } else { @@ -4129,11 +4183,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); // Materialize to a temporary. - addr = CreateTempAlloca(RV.getScalarVal()->getType(), - CharUnits::fromQuantity(std::max( - layout->getAlignment(), scalarAlign)), - "tmp", - /*ArraySize=*/nullptr, &AllocaAddr); + addr = CreateTempAlloca( + RV.getScalarVal()->getType(), + CharUnits::fromQuantity(std::max( + (unsigned)layout->getAlignment().value(), scalarAlign)), + "tmp", + /*ArraySize=*/nullptr, &AllocaAddr); tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); Builder.CreateStore(RV.getScalarVal(), addr); @@ -4273,8 +4328,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // Update the largest vector width if any arguments have vector types. for (unsigned i = 0; i < IRCallArgs.size(); ++i) { if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) - LargestVectorWidth = std::max(LargestVectorWidth, - VT->getPrimitiveSizeInBits()); + LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getFixedSize()); } // Compute the calling convention and attributes. @@ -4284,6 +4339,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Callee.getAbstractInfo(), Attrs, CallingConv, /*AttrOnCallSite=*/true); + if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) + if (FD->usesFPIntrin()) + // All calls within a strictfp function are marked strictfp + Attrs = + Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, + llvm::Attribute::StrictFP); + // Apply some call-site-specific attributes. // TODO: work this into building the attribute set. @@ -4333,6 +4395,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, SmallVector<llvm::OperandBundleDef, 1> BundleList = getBundlesForFunclet(CalleePtr); + if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) + if (FD->usesFPIntrin()) + // All calls within a strictfp function are marked strictfp + Attrs = + Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, + llvm::Attribute::StrictFP); + // Emit the actual call/invoke instruction. llvm::CallBase *CI; if (!InvokeDest) { @@ -4346,6 +4415,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (callOrInvoke) *callOrInvoke = CI; + // If this is within a function that has the guard(nocf) attribute and is an + // indirect call, add the "guard_nocf" attribute to this call to indicate that + // Control Flow Guard checks should not be added, even if the call is inlined. + if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { + if (const auto *A = FD->getAttr<CFGuardAttr>()) { + if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) + Attrs = Attrs.addAttribute( + getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); + } + } + // Apply the attributes and calling convention. CI->setAttributes(Attrs); CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); @@ -4357,8 +4437,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // Update largest vector width from the return type. if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) - LargestVectorWidth = std::max(LargestVectorWidth, - VT->getPrimitiveSizeInBits()); + LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getFixedSize()); // Insert instrumentation or attach profile metadata at indirect call sites. // For more details, see the comment before the definition of @@ -4548,7 +4628,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(), - AlignmentCI->getZExtValue(), OffsetValue); + AlignmentCI, OffsetValue); } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) { llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()] .getRValue(*this) @@ -4558,6 +4638,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } } + // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though + // we can't use the full cleanup mechanism. + for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) + LifetimeEnd.Emit(*this, /*Flags=*/{}); + return Ret; } |
