diff options
Diffstat (limited to 'lib/CodeGen/CodeGenFunction.cpp')
-rw-r--r-- | lib/CodeGen/CodeGenFunction.cpp | 228 |
1 files changed, 169 insertions, 59 deletions
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp index c23b25ea461fe..9dbd7cc3fcbfb 100644 --- a/lib/CodeGen/CodeGenFunction.cpp +++ b/lib/CodeGen/CodeGenFunction.cpp @@ -33,9 +33,11 @@ #include "clang/Frontend/CodeGenOptions.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Operator.h" +#include "llvm/Transforms/Utils/PromoteMemToReg.h" using namespace clang; using namespace CodeGen; @@ -87,7 +89,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) llvm::FastMathFlags FMF; if (CGM.getLangOpts().FastMath) - FMF.setUnsafeAlgebra(); + FMF.setFast(); if (CGM.getLangOpts().FiniteMathOnly) { FMF.setNoNaNs(); FMF.setNoInfs(); @@ -101,6 +103,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) if (CGM.getCodeGenOpts().ReciprocalMath) { FMF.setAllowReciprocal(); } + if (CGM.getCodeGenOpts().Reassociate) { + FMF.setAllowReassoc(); + } Builder.setFastMathFlags(FMF); } @@ -118,27 +123,32 @@ CodeGenFunction::~CodeGenFunction() { } CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T, - LValueBaseInfo *BaseInfo) { - return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, - /*forPointee*/ true); + LValueBaseInfo *BaseInfo, + TBAAAccessInfo *TBAAInfo) { + return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo, + /* forPointeeType= */ true); } CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo, + TBAAAccessInfo *TBAAInfo, bool forPointeeType) { + if (TBAAInfo) + *TBAAInfo = CGM.getTBAAAccessInfo(T); + // Honor alignment typedef attributes even on incomplete types. // We also honor them straight for C++ class types, even as pointees; // there's an expressivity gap here. if (auto TT = T->getAs<TypedefType>()) { if (auto Align = TT->getDecl()->getMaxAlignment()) { if (BaseInfo) - *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType, false); + *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); return getContext().toCharUnitsFromBits(Align); } } if (BaseInfo) - *BaseInfo = LValueBaseInfo(AlignmentSource::Type, false); + *BaseInfo = LValueBaseInfo(AlignmentSource::Type); CharUnits Alignment; if (T->isIncompleteType()) { @@ -169,9 +179,10 @@ CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { LValueBaseInfo BaseInfo; - CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo); + TBAAAccessInfo TBAAInfo; + CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, - CGM.getTBAAInfo(T)); + TBAAInfo); } /// Given a value of type T* that may not be to a complete object, @@ -179,8 +190,10 @@ LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { LValue CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { LValueBaseInfo BaseInfo; - CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, /*pointee*/ true); - return MakeAddrLValue(Address(V, Align), T, BaseInfo); + TBAAAccessInfo TBAAInfo; + CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, + /* forPointeeType= */ true); + return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); } @@ -344,8 +357,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { // Emit function epilog (to return). llvm::DebugLoc Loc = EmitReturnBlock(); - if (ShouldInstrumentFunction()) - EmitFunctionInstrumentation("__cyg_profile_func_exit"); + if (ShouldInstrumentFunction()) { + if (CGM.getCodeGenOpts().InstrumentFunctions) + CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); + if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) + CurFn->addFnAttr("instrument-function-exit-inlined", + "__cyg_profile_func_exit"); + } // Emit debug descriptor for function end. if (CGDebugInfo *DI = getDebugInfo()) @@ -411,12 +429,26 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { I->first->replaceAllUsesWith(I->second); I->first->eraseFromParent(); } + + // Eliminate CleanupDestSlot alloca by replacing it with SSA values and + // PHIs if the current function is a coroutine. We don't do it for all + // functions as it may result in slight increase in numbers of instructions + // if compiled with no optimizations. We do it for coroutine as the lifetime + // of CleanupDestSlot alloca make correct coroutine frame building very + // difficult. + if (NormalCleanupDest && isCoroutine()) { + llvm::DominatorTree DT(*CurFn); + llvm::PromoteMemToReg(NormalCleanupDest, DT); + NormalCleanupDest = nullptr; + } } /// ShouldInstrumentFunction - Return true if the current function should be /// instrumented with __cyg_profile_func_* calls bool CodeGenFunction::ShouldInstrumentFunction() { - if (!CGM.getCodeGenOpts().InstrumentFunctions) + if (!CGM.getCodeGenOpts().InstrumentFunctions && + !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && + !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) return false; if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) return false; @@ -429,29 +461,47 @@ bool CodeGenFunction::ShouldXRayInstrumentFunction() const { return CGM.getCodeGenOpts().XRayInstrumentFunctions; } -/// EmitFunctionInstrumentation - Emit LLVM code to call the specified -/// instrumentation function with the current function and the call site, if -/// function instrumentation is enabled. -void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { - auto NL = ApplyDebugLocation::CreateArtificial(*this); - // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); - llvm::PointerType *PointerTy = Int8PtrTy; - llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; - llvm::FunctionType *FunctionTy = - llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); - - llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); - llvm::CallInst *CallSite = Builder.CreateCall( - CGM.getIntrinsic(llvm::Intrinsic::returnaddress), - llvm::ConstantInt::get(Int32Ty, 0), - "callsite"); - - llvm::Value *args[] = { - llvm::ConstantExpr::getBitCast(CurFn, PointerTy), - CallSite - }; +/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to +/// the __xray_customevent(...) builin calls, when doing XRay instrumentation. +bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { + return CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents; +} - EmitNounwindRuntimeCall(F, args); +llvm::Constant * +CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, + llvm::Constant *Addr) { + // Addresses stored in prologue data can't require run-time fixups and must + // be PC-relative. Run-time fixups are undesirable because they necessitate + // writable text segments, which are unsafe. And absolute addresses are + // undesirable because they break PIE mode. + + // Add a layer of indirection through a private global. Taking its address + // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. + auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), + /*isConstant=*/true, + llvm::GlobalValue::PrivateLinkage, Addr); + + // Create a PC-relative address. + auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); + auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); + auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); + return (IntPtrTy == Int32Ty) + ? PCRelAsInt + : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); +} + +llvm::Value * +CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, + llvm::Value *EncodedAddr) { + // Reconstruct the address of the global. + auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); + auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); + auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); + auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); + + // Load the original pointer through the global. + return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), + "decoded_addr"); } static void removeImageAccessQualifier(std::string& TyName) { @@ -480,8 +530,8 @@ static void removeImageAccessQualifier(std::string& TyName) { // for example in clGetKernelArgInfo() implementation between the address // spaces with targets without unique mapping to the OpenCL address spaces // (basically all single AS CPUs). -static unsigned ArgInfoAddressSpace(unsigned LangAS) { - switch (LangAS) { +static unsigned ArgInfoAddressSpace(LangAS AS) { + switch (AS) { case LangAS::opencl_global: return 1; case LangAS::opencl_constant: return 2; case LangAS::opencl_local: return 3; @@ -621,7 +671,10 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, // Get image and pipe access qualifier: if (ty->isImageType()|| ty->isPipeType()) { - const OpenCLAccessAttr *A = parm->getAttr<OpenCLAccessAttr>(); + const Decl *PDecl = parm; + if (auto *TD = dyn_cast<TypedefType>(ty)) + PDecl = TD->getDecl(); + const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); if (A && A->isWriteOnly()) accessQuals.push_back(llvm::MDString::get(Context, "write_only")); else if (A && A->isReadWrite()) @@ -721,6 +774,35 @@ static void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { Fn->removeFnAttr(llvm::Attribute::SanitizeThread); } +static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { + auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); + if (!MD || !MD->getDeclName().getAsIdentifierInfo() || + !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || + (MD->getNumParams() != 1 && MD->getNumParams() != 2)) + return false; + + if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) + return false; + + if (MD->getNumParams() == 2) { + auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); + if (!PT || !PT->isVoidPointerType() || + !PT->getPointeeType().isConstQualified()) + return false; + } + + return true; +} + +/// Return the UBSan prologue signature for \p FD if one is available. +static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, + const FunctionDecl *FD) { + if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) + if (!MD->isStatic()) + return nullptr; + return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); +} + void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, @@ -744,8 +826,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, CurFnInfo = &FnInfo; assert(CurFn->isDeclaration() && "Function already has body?"); - if (CGM.isInSanitizerBlacklist(Fn, Loc)) - SanOpts.clear(); + // If this function has been blacklisted for any of the enabled sanitizers, + // disable the sanitizer for the function. + do { +#define SANITIZER(NAME, ID) \ + if (SanOpts.empty()) \ + break; \ + if (SanOpts.has(SanitizerKind::ID)) \ + if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ + SanOpts.set(SanitizerKind::ID, false); + +#include "clang/Basic/Sanitizers.def" +#undef SANITIZER + } while (0); if (D) { // Apply the no_sanitize* attributes to SanOpts. @@ -756,6 +849,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, // Apply sanitizer attributes to the function. if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) Fn->addFnAttr(llvm::Attribute::SanitizeAddress); + if (SanOpts.hasOneOf(SanitizerKind::HWAddress)) + Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); if (SanOpts.has(SanitizerKind::Thread)) Fn->addFnAttr(llvm::Attribute::SanitizeThread); if (SanOpts.has(SanitizerKind::Memory)) @@ -780,6 +875,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, } } + // Ignore unrelated casts in STL allocate() since the allocator must cast + // from void* to T* before object initialization completes. Don't match on the + // namespace because not all allocators are in std:: + if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { + if (matchesStlAllocatorFn(D, getContext())) + SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; + } + // Apply xray attributes to the function (as a string, for now) if (D && ShouldXRayInstrumentFunction()) { if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) { @@ -799,14 +902,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, } } - if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) - if (CGM.getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>()) - CGM.getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn); - // Add no-jump-tables value. Fn->addFnAttr("no-jump-tables", llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); + // Add profile-sample-accurate value. + if (CGM.getCodeGenOpts().ProfileSampleAccurate) + Fn->addFnAttr("profile-sample-accurate"); + if (getLangOpts().OpenCL) { // Add metadata for a kernel function. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) @@ -817,11 +920,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, // prologue data. if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { - if (llvm::Constant *PrologueSig = - CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { + if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { llvm::Constant *FTRTTIConst = CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true); - llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst }; + llvm::Constant *FTRTTIConstEncoded = + EncodeAddrForUseInPrologue(Fn, FTRTTIConst); + llvm::Constant *PrologueStructElems[] = {PrologueSig, + FTRTTIConstEncoded}; llvm::Constant *PrologueStructConst = llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); Fn->setPrologueData(PrologueStructConst); @@ -885,8 +990,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder); } - if (ShouldInstrumentFunction()) - EmitFunctionInstrumentation("__cyg_profile_func_enter"); + if (ShouldInstrumentFunction()) { + if (CGM.getCodeGenOpts().InstrumentFunctions) + CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); + if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) + CurFn->addFnAttr("instrument-function-entry-inlined", + "__cyg_profile_func_enter"); + if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) + CurFn->addFnAttr("instrument-function-entry-inlined", + "__cyg_profile_func_enter_bare"); + } // Since emitting the mcount call here impacts optimizations such as function // inlining, we just add an attribute to insert a mcount call in backend. @@ -896,8 +1009,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, if (CGM.getCodeGenOpts().CallFEntry) Fn->addFnAttr("fentry-call", "true"); else { - if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) - Fn->addFnAttr("counting-function", getTarget().getMCountName()); + if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { + Fn->addFnAttr("instrument-function-entry-inlined", + getTarget().getMCountName()); + } } } @@ -1185,16 +1300,11 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, !getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>()) CGM.getCUDARuntime().emitDeviceStub(*this, Args); - else if (isa<CXXConversionDecl>(FD) && - cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { - // The lambda conversion to block pointer is special; the semantics can't be - // expressed in the AST, so IRGen needs to special-case it. - EmitLambdaToBlockPointerBody(Args); - } else if (isa<CXXMethodDecl>(FD) && - cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { + else if (isa<CXXMethodDecl>(FD) && + cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { // The lambda static invoker function is special, because it forwards or // clones the body of the function call operator (but is actually static). - EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); + EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { |