diff options
Diffstat (limited to 'clang/lib/CodeGen/CodeGenFunction.h')
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 71 |
1 files changed, 53 insertions, 18 deletions
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index fe0890f433e8..a535aa7c0410 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -41,6 +41,7 @@ #include "llvm/IR/ValueHandle.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/SanitizerStats.h" +#include <optional> namespace llvm { class BasicBlock; @@ -539,7 +540,7 @@ public: /// potentially set the return value. bool SawAsmBlock = false; - const NamedDecl *CurSEHParent = nullptr; + GlobalDecl CurSEHParent; /// True if the current function is an outlined SEH helper. This can be a /// finally block or filter expression. @@ -570,7 +571,7 @@ public: return false; // C++11 and later guarantees that a thread eventually will do one of the - // following (6.9.2.3.1 in C++11): + // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1): // - terminate, // - make a call to a library I/O function, // - perform an access through a volatile glvalue, or @@ -609,7 +610,7 @@ public: const CodeGen::CGBlockInfo *BlockInfo = nullptr; llvm::Value *BlockPointer = nullptr; - llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields; + llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields; FieldDecl *LambdaThisCaptureField = nullptr; /// A mapping from NRVO variables to the flags used to indicate @@ -723,7 +724,7 @@ public: FPOptions OldFPFeatures; llvm::fp::ExceptionBehavior OldExcept; llvm::RoundingMode OldRounding; - Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard; + std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard; }; FPOptions CurFPFeatures; @@ -1094,7 +1095,7 @@ public: void ForceCleanup() { RunCleanupsScope::ForceCleanup(); - MappedVars.restore(CGF); + restoreMap(); } /// Exit scope - all the mapped variables are restored. @@ -1108,6 +1109,11 @@ public: VD = VD->getCanonicalDecl(); return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0; } + + /// Restore all mapped variables w/o clean up. This is usefully when we want + /// to reference the original variables but don't want the clean up because + /// that could emit lifetime end too early, causing backend issue #56913. + void restoreMap() { MappedVars.restore(CGF); } }; /// Save/restore original map of previously emitted local vars in case when we @@ -1522,7 +1528,8 @@ public: /// If \p StepV is null, the default increment is 1. void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) { if (CGM.getCodeGenOpts().hasProfileClangInstr() && - !CurFn->hasFnAttribute(llvm::Attribute::NoProfile)) + !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) && + !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile)) PGO.emitCounterIncrement(Builder, S, StepV); PGO.setCurrentStmt(S); } @@ -2015,7 +2022,7 @@ public: return getInvokeDestImpl(); } - bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; } + bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } const TargetInfo &getTarget() const { return Target; } llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); } @@ -2225,7 +2232,7 @@ public: /// Emit the unified return block, trying to avoid its emission when /// possible. /// \return The debug location of the user written return statement if the - /// return block is is avoided. + /// return block is avoided. llvm::DebugLoc EmitReturnBlock(); /// FinishFunction - Complete IR generation of the current function. It is @@ -2878,7 +2885,7 @@ public: AggValueSlot::Overlap_t Overlap, SourceLocation Loc, bool NewPointerIsChecked); - /// Emit assumption load for all bases. Requires to be be called only on + /// Emit assumption load for all bases. Requires to be called only on /// most-derived class and not under construction of the object. void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This); @@ -3207,7 +3214,7 @@ public: /// This function may clear the current insertion point; callers should use /// EnsureInsertPoint if they wish to subsequently generate code without first /// calling EmitBlock, EmitBranch, or EmitStmt. - void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None); + void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt); /// EmitSimpleStmt - Try to emit a "simple" statement which does not /// necessarily require an insertion point or debug information; typically @@ -3235,10 +3242,10 @@ public: void EmitIfStmt(const IfStmt &S); void EmitWhileStmt(const WhileStmt &S, - ArrayRef<const Attr *> Attrs = None); - void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None); + ArrayRef<const Attr *> Attrs = std::nullopt); + void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt); void EmitForStmt(const ForStmt &S, - ArrayRef<const Attr *> Attrs = None); + ArrayRef<const Attr *> Attrs = std::nullopt); void EmitReturnStmt(const ReturnStmt &S); void EmitDeclStmt(const DeclStmt &S); void EmitBreakStmt(const BreakStmt &S); @@ -3315,7 +3322,7 @@ public: llvm::Value *ParentFP); void EmitCXXForRangeStmt(const CXXForRangeStmt &S, - ArrayRef<const Attr *> Attrs = None); + ArrayRef<const Attr *> Attrs = std::nullopt); /// Controls insertion of cancellation exit blocks in worksharing constructs. class OMPCancelStackRAII { @@ -3514,6 +3521,7 @@ public: void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S); void EmitOMPTaskDirective(const OMPTaskDirective &S); void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S); + void EmitOMPErrorDirective(const OMPErrorDirective &S); void EmitOMPBarrierDirective(const OMPBarrierDirective &S); void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S); void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S); @@ -3967,6 +3975,8 @@ public: llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); + llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar); LValue EmitLValueForField(LValue Base, const FieldDecl* Field); LValue EmitLValueForLambdaField(const FieldDecl *Field); @@ -4194,6 +4204,12 @@ public: llvm::Type *getEltType(const SVETypeFlags &TypeFlags); llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags); llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags); + llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags, + llvm::Type *ReturnType, + ArrayRef<llvm::Value *> Ops); + llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags, + llvm::Type *ReturnType, + ArrayRef<llvm::Value *> Ops); llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags); llvm::Value *EmitSVEDupX(llvm::Value *Scalar); llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty); @@ -4247,7 +4263,8 @@ public: llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue); - bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, + llvm::Value *EmitLoongArchBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, llvm::AtomicOrdering &AO, llvm::SyncScope::ID &SSID); @@ -4403,6 +4420,11 @@ public: /// EmitLoadOfComplex - Load a complex number from the specified l-value. ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc); + ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType); + llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType); + ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType); + ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType); + Address emitAddrOfRealComponent(Address complex, QualType complexType); Address emitAddrOfImagComponent(Address complex, QualType complexType); @@ -4600,6 +4622,9 @@ public: /// passing to a runtime sanitizer handler. llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc); + void EmitKCFIOperandBundle(const CGCallee &Callee, + SmallVectorImpl<llvm::OperandBundleDef> &Bundles); + /// Create a basic block that will either trap or call a handler function in /// the UBSan runtime with the provided arguments, and create a conditional /// branch to it. @@ -4789,6 +4814,12 @@ public: // last (if it exists). void EmitMultiVersionResolver(llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options); + void + EmitX86MultiVersionResolver(llvm::Function *Resolver, + ArrayRef<MultiVersionResolverOption> Options); + void + EmitAArch64MultiVersionResolver(llvm::Function *Resolver, + ArrayRef<MultiVersionResolverOption> Options); private: QualType getVarArgType(const Expr *Arg); @@ -4807,7 +4838,11 @@ private: llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs); llvm::Value *EmitX86CpuSupports(uint64_t Mask); llvm::Value *EmitX86CpuInit(); - llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO); + llvm::Value *FormX86ResolverCondition(const MultiVersionResolverOption &RO); + llvm::Value *EmitAArch64CpuInit(); + llvm::Value * + FormAArch64ResolverCondition(const MultiVersionResolverOption &RO); + llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs); }; @@ -4817,9 +4852,9 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) { // Otherwise, we need an alloca. auto align = CharUnits::fromQuantity( - CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType())); + CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType())); Address alloca = - CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save"); + CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save"); CGF.Builder.CreateStore(value, alloca); return saved_type(alloca.getPointer(), true); |