diff options
Diffstat (limited to 'clang/lib/CodeGen/CodeGenFunction.h')
| -rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 359 | 
1 files changed, 323 insertions, 36 deletions
| diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index 3d8bc93eb965..d794f4f0fa81 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -26,6 +26,7 @@  #include "clang/AST/ExprCXX.h"  #include "clang/AST/ExprObjC.h"  #include "clang/AST/ExprOpenMP.h" +#include "clang/AST/StmtOpenMP.h"  #include "clang/AST/Type.h"  #include "clang/Basic/ABI.h"  #include "clang/Basic/CapturedStmt.h" @@ -36,6 +37,7 @@  #include "llvm/ADT/DenseMap.h"  #include "llvm/ADT/MapVector.h"  #include "llvm/ADT/SmallVector.h" +#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"  #include "llvm/IR/ValueHandle.h"  #include "llvm/Support/Debug.h"  #include "llvm/Transforms/Utils/SanitizerStats.h" @@ -75,7 +77,11 @@ class ObjCAtTryStmt;  class ObjCAtThrowStmt;  class ObjCAtSynchronizedStmt;  class ObjCAutoreleasePoolStmt; +class OMPUseDevicePtrClause; +class OMPUseDeviceAddrClause;  class ReturnsNonNullAttr; +class SVETypeFlags; +class OMPExecutableDirective;  namespace analyze_os_log {  class OSLogBufferLayout; @@ -118,6 +124,7 @@ enum TypeEvaluationKind {    SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1)             \    SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0)                  \    SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0)                          \ +  SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0)                       \    SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0)                     \    SANITIZER_CHECK(MissingReturn, missing_return, 0)                            \    SANITIZER_CHECK(MulOverflow, mul_overflow, 0)                                \ @@ -258,6 +265,9 @@ public:    CodeGenModule &CGM;  // Per-module state.    const TargetInfo &Target; +  // For EH/SEH outlined funclets, this field points to parent's CGF +  CodeGenFunction *ParentCGF = nullptr; +    typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;    LoopInfoStack LoopStack;    CGBuilderTy Builder; @@ -332,6 +342,10 @@ public:    /// This is invalid if sret is not in use.    Address ReturnValuePointer = Address::invalid(); +  /// If a return statement is being visited, this holds the return statment's +  /// result expression. +  const Expr *RetExpr = nullptr; +    /// Return true if a label was seen in the current scope.    bool hasLabelBeenSeenInCurrentScope() const {      if (CurLexicalScope) @@ -485,6 +499,9 @@ public:    /// region.    bool IsInPreservedAIRegion = false; +  /// True if the current statement has nomerge attribute. +  bool InNoMergeAttributedStmt = false; +    const CodeGen::CGBlockInfo *BlockInfo = nullptr;    llvm::Value *BlockPointer = nullptr; @@ -533,9 +550,6 @@ public:    unsigned NextCleanupDestIndex = 1; -  /// FirstBlockInfo - The head of a singly-linked-list of block layouts. -  CGBlockInfo *FirstBlockInfo = nullptr; -    /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.    llvm::BasicBlock *EHResumeBlock = nullptr; @@ -560,11 +574,49 @@ public:    llvm::BasicBlock *getInvokeDestImpl(); +  /// Parent loop-based directive for scan directive. +  const OMPExecutableDirective *OMPParentLoopDirectiveForScan = nullptr; +  llvm::BasicBlock *OMPBeforeScanBlock = nullptr; +  llvm::BasicBlock *OMPAfterScanBlock = nullptr; +  llvm::BasicBlock *OMPScanExitBlock = nullptr; +  llvm::BasicBlock *OMPScanDispatch = nullptr; +  bool OMPFirstScanLoop = false; + +  /// Manages parent directive for scan directives. +  class ParentLoopDirectiveForScanRegion { +    CodeGenFunction &CGF; +    const OMPExecutableDirective *ParentLoopDirectiveForScan; + +  public: +    ParentLoopDirectiveForScanRegion( +        CodeGenFunction &CGF, +        const OMPExecutableDirective &ParentLoopDirectiveForScan) +        : CGF(CGF), +          ParentLoopDirectiveForScan(CGF.OMPParentLoopDirectiveForScan) { +      CGF.OMPParentLoopDirectiveForScan = &ParentLoopDirectiveForScan; +    } +    ~ParentLoopDirectiveForScanRegion() { +      CGF.OMPParentLoopDirectiveForScan = ParentLoopDirectiveForScan; +    } +  }; +    template <class T>    typename DominatingValue<T>::saved_type saveValueInCond(T value) {      return DominatingValue<T>::save(*this, value);    } +  class CGFPOptionsRAII { +  public: +    CGFPOptionsRAII(CodeGenFunction &CGF, FPOptions FPFeatures); +    ~CGFPOptionsRAII(); + +  private: +    CodeGenFunction &CGF; +    FPOptions OldFPFeatures; +    Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard; +  }; +  FPOptions CurFPFeatures; +  public:    /// ObjCEHValueStack - Stack of Objective-C exception values, used for    /// rethrows. @@ -1541,6 +1593,169 @@ public:      CallArgList OldCXXInheritedCtorInitExprArgs;    }; +  // Helper class for the OpenMP IR Builder. Allows reusability of code used for +  // region body, and finalization codegen callbacks. This will class will also +  // contain privatization functions used by the privatization call backs +  // +  // TODO: this is temporary class for things that are being moved out of +  // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or +  // utility function for use with the OMPBuilder. Once that move to use the +  // OMPBuilder is done, everything here will either become part of CodeGenFunc. +  // directly, or a new helper class that will contain functions used by both +  // this and the OMPBuilder + +  struct OMPBuilderCBHelpers { + +    OMPBuilderCBHelpers() = delete; +    OMPBuilderCBHelpers(const OMPBuilderCBHelpers &) = delete; +    OMPBuilderCBHelpers &operator=(const OMPBuilderCBHelpers &) = delete; + +    using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy; + +    /// Cleanup action for allocate support. +    class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup { + +    private: +      llvm::CallInst *RTLFnCI; + +    public: +      OMPAllocateCleanupTy(llvm::CallInst *RLFnCI) : RTLFnCI(RLFnCI) { +        RLFnCI->removeFromParent(); +      } + +      void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { +        if (!CGF.HaveInsertPoint()) +          return; +        CGF.Builder.Insert(RTLFnCI); +      } +    }; + +    /// Returns address of the threadprivate variable for the current +    /// thread. This Also create any necessary OMP runtime calls. +    /// +    /// \param VD VarDecl for Threadprivate variable. +    /// \param VDAddr Address of the Vardecl +    /// \param Loc  The location where the barrier directive was encountered +    static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, +                                          const VarDecl *VD, Address VDAddr, +                                          SourceLocation Loc); + +    /// Gets the OpenMP-specific address of the local variable /p VD. +    static Address getAddressOfLocalVariable(CodeGenFunction &CGF, +                                             const VarDecl *VD); +    /// Get the platform-specific name separator. +    /// \param Parts different parts of the final name that needs separation +    /// \param FirstSeparator First separator used between the initial two +    ///        parts of the name. +    /// \param Separator separator used between all of the rest consecutinve +    ///        parts of the name +    static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, +                                             StringRef FirstSeparator = ".", +                                             StringRef Separator = "."); +    /// Emit the Finalization for an OMP region +    /// \param CGF	The Codegen function this belongs to +    /// \param IP	Insertion point for generating the finalization code. +    static void FinalizeOMPRegion(CodeGenFunction &CGF, InsertPointTy IP) { +      CGBuilderTy::InsertPointGuard IPG(CGF.Builder); +      assert(IP.getBlock()->end() != IP.getPoint() && +             "OpenMP IR Builder should cause terminated block!"); + +      llvm::BasicBlock *IPBB = IP.getBlock(); +      llvm::BasicBlock *DestBB = IPBB->getUniqueSuccessor(); +      assert(DestBB && "Finalization block should have one successor!"); + +      // erase and replace with cleanup branch. +      IPBB->getTerminator()->eraseFromParent(); +      CGF.Builder.SetInsertPoint(IPBB); +      CodeGenFunction::JumpDest Dest = CGF.getJumpDestInCurrentScope(DestBB); +      CGF.EmitBranchThroughCleanup(Dest); +    } + +    /// Emit the body of an OMP region +    /// \param CGF	The Codegen function this belongs to +    /// \param RegionBodyStmt	The body statement for the OpenMP region being +    /// 			 generated +    /// \param CodeGenIP	Insertion point for generating the body code. +    /// \param FiniBB	The finalization basic block +    static void EmitOMPRegionBody(CodeGenFunction &CGF, +                                  const Stmt *RegionBodyStmt, +                                  InsertPointTy CodeGenIP, +                                  llvm::BasicBlock &FiniBB) { +      llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock(); +      if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator()) +        CodeGenIPBBTI->eraseFromParent(); + +      CGF.Builder.SetInsertPoint(CodeGenIPBB); + +      CGF.EmitStmt(RegionBodyStmt); + +      if (CGF.Builder.saveIP().isSet()) +        CGF.Builder.CreateBr(&FiniBB); +    } + +    /// RAII for preserving necessary info during Outlined region body codegen. +    class OutlinedRegionBodyRAII { + +      llvm::AssertingVH<llvm::Instruction> OldAllocaIP; +      CodeGenFunction::JumpDest OldReturnBlock; +      CGBuilderTy::InsertPoint IP; +      CodeGenFunction &CGF; + +    public: +      OutlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, +                             llvm::BasicBlock &RetBB) +          : CGF(cgf) { +        assert(AllocaIP.isSet() && +               "Must specify Insertion point for allocas of outlined function"); +        OldAllocaIP = CGF.AllocaInsertPt; +        CGF.AllocaInsertPt = &*AllocaIP.getPoint(); +        IP = CGF.Builder.saveIP(); + +        OldReturnBlock = CGF.ReturnBlock; +        CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB); +      } + +      ~OutlinedRegionBodyRAII() { +        CGF.AllocaInsertPt = OldAllocaIP; +        CGF.ReturnBlock = OldReturnBlock; +        CGF.Builder.restoreIP(IP); +      } +    }; + +    /// RAII for preserving necessary info during inlined region body codegen. +    class InlinedRegionBodyRAII { + +      llvm::AssertingVH<llvm::Instruction> OldAllocaIP; +      CodeGenFunction &CGF; + +    public: +      InlinedRegionBodyRAII(CodeGenFunction &cgf, InsertPointTy &AllocaIP, +                            llvm::BasicBlock &FiniBB) +          : CGF(cgf) { +        // Alloca insertion block should be in the entry block of the containing +        // function so it expects an empty AllocaIP in which case will reuse the +        // old alloca insertion point, or a new AllocaIP in the same block as +        // the old one +        assert((!AllocaIP.isSet() || +                CGF.AllocaInsertPt->getParent() == AllocaIP.getBlock()) && +               "Insertion point should be in the entry block of containing " +               "function!"); +        OldAllocaIP = CGF.AllocaInsertPt; +        if (AllocaIP.isSet()) +          CGF.AllocaInsertPt = &*AllocaIP.getPoint(); + +        // TODO: Remove the call, after making sure the counter is not used by +        //       the EHStack. +        // Since this is an inlined region, it should not modify the +        // ReturnBlock, and should reuse the one for the enclosing outlined +        // region. So, the JumpDest being return by the function is discarded +        (void)CGF.getJumpDestInCurrentScope(&FiniBB); +      } + +      ~InlinedRegionBodyRAII() { CGF.AllocaInsertPt = OldAllocaIP; } +    }; +  }; +  private:    /// CXXThisDecl - When generating code for a C++ member function,    /// this will hold the implicit 'this' declaration. @@ -1772,7 +1987,6 @@ public:    /// information about the block, including the block invoke function, the    /// captured variables, etc.    llvm::Value *EmitBlockLiteral(const BlockExpr *); -  static void destroyBlockInfos(CGBlockInfo *info);    llvm::Function *GenerateBlockFunction(GlobalDecl GD,                                          const CGBlockInfo &Info, @@ -2155,13 +2369,6 @@ public:    LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);    LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T); -  CharUnits getNaturalTypeAlignment(QualType T, -                                    LValueBaseInfo *BaseInfo = nullptr, -                                    TBAAAccessInfo *TBAAInfo = nullptr, -                                    bool forPointeeType = false); -  CharUnits getNaturalPointeeTypeAlignment(QualType T, -                                           LValueBaseInfo *BaseInfo = nullptr, -                                           TBAAAccessInfo *TBAAInfo = nullptr);    Address EmitLoadOfReference(LValue RefLVal,                                LValueBaseInfo *PointeeBaseInfo = nullptr, @@ -2264,8 +2471,9 @@ public:    /// CreateAggTemp - Create a temporary memory object for the given    /// aggregate type. -  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") { -    return AggValueSlot::forAddr(CreateMemTemp(T, Name), +  AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp", +                             Address *Alloca = nullptr) { +    return AggValueSlot::forAddr(CreateMemTemp(T, Name, Alloca),                                   T.getQualifiers(),                                   AggValueSlot::IsNotDestructed,                                   AggValueSlot::DoesNotNeedGCBarriers, @@ -2594,7 +2802,8 @@ public:    Address EmitCXXUuidofExpr(const CXXUuidofExpr *E);    /// Situations in which we might emit a check for the suitability of a -  ///        pointer or glvalue. +  /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in +  /// compiler-rt.    enum TypeCheckKind {      /// Checking the operand of a load. Must be suitably sized and aligned.      TCK_Load, @@ -2826,7 +3035,7 @@ public:    PeepholeProtection protectFromPeepholes(RValue rvalue);    void unprotectFromPeepholes(PeepholeProtection protection); -  void EmitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty, +  void emitAlignmentAssumptionCheck(llvm::Value *Ptr, QualType Ty,                                      SourceLocation Loc,                                      SourceLocation AssumptionLoc,                                      llvm::Value *Alignment, @@ -2834,13 +3043,14 @@ public:                                      llvm::Value *TheCheck,                                      llvm::Instruction *Assumption); -  void EmitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, +  void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty,                                 SourceLocation Loc, SourceLocation AssumptionLoc,                                 llvm::Value *Alignment,                                 llvm::Value *OffsetValue = nullptr); -  void EmitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, -                               SourceLocation AssumptionLoc, llvm::Value *Alignment, +  void emitAlignmentAssumption(llvm::Value *PtrValue, const Expr *E, +                               SourceLocation AssumptionLoc, +                               llvm::Value *Alignment,                                 llvm::Value *OffsetValue = nullptr);    //===--------------------------------------------------------------------===// @@ -2983,7 +3193,8 @@ public:    llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);    llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);    Address GenerateCapturedStmtArgument(const CapturedStmt &S); -  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S); +  llvm::Function *GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S, +                                                     SourceLocation Loc);    void GenerateOpenMPCapturedVars(const CapturedStmt &S,                                    SmallVectorImpl<llvm::Value *> &CapturedVars);    void emitOMPSimpleStore(LValue LVal, RValue RVal, QualType RValTy, @@ -3037,7 +3248,10 @@ public:    void EmitOMPPrivateClause(const OMPExecutableDirective &D,                              OMPPrivateScope &PrivateScope);    void EmitOMPUseDevicePtrClause( -      const OMPClause &C, OMPPrivateScope &PrivateScope, +      const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope, +      const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap); +  void EmitOMPUseDeviceAddrClause( +      const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,        const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);    /// Emit code for copyin clause in \a D directive. The next code is    /// generated at the start of outlined functions for directives: @@ -3091,7 +3305,8 @@ public:    /// proper codegen in internal captured statement.    ///    void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, -                                  OMPPrivateScope &PrivateScope); +                                  OMPPrivateScope &PrivateScope, +                                  bool ForInscan = false);    /// Emit final update of reduction values to original variables at    /// the end of the directive.    /// @@ -3149,6 +3364,8 @@ public:    void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);    void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);    void EmitOMPFlushDirective(const OMPFlushDirective &S); +  void EmitOMPDepobjDirective(const OMPDepobjDirective &S); +  void EmitOMPScanDirective(const OMPScanDirective &S);    void EmitOMPOrderedDirective(const OMPOrderedDirective &S);    void EmitOMPAtomicDirective(const OMPAtomicDirective &S);    void EmitOMPTargetDirective(const OMPTargetDirective &S); @@ -3250,8 +3467,8 @@ public:    /// \param PostIncGen Genrator for post-increment code (required for ordered    /// loop directvies).    void EmitOMPInnerLoop( -      const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, -      const Expr *IncExpr, +      const OMPExecutableDirective &S, bool RequiresCleanup, +      const Expr *LoopCond, const Expr *IncExpr,        const llvm::function_ref<void(CodeGenFunction &)> BodyGen,        const llvm::function_ref<void(CodeGenFunction &)> PostIncGen); @@ -3517,6 +3734,7 @@ public:    LValue EmitUnaryOpLValue(const UnaryOperator *E);    LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,                                  bool Accessed = false); +  LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E);    LValue EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,                                   bool IsLowerBound = true);    LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E); @@ -3722,6 +3940,8 @@ public:    RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,                                         ReturnValueSlot ReturnValue); +  RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E, +                                        ReturnValueSlot ReturnValue);    RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,                           const CallExpr *E, ReturnValueSlot ReturnValue); @@ -3757,6 +3977,13 @@ public:    llvm::Value *EmitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,                                       ReturnValueSlot ReturnValue,                                       llvm::Triple::ArchType Arch); +  llvm::Value *EmitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, +                                     ReturnValueSlot ReturnValue, +                                     llvm::Triple::ArchType Arch); +  llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, +                                   QualType RTy); +  llvm::Value *EmitCMSEClearRecord(llvm::Value *V, llvm::ArrayType *ATy, +                                   QualType RTy);    llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,                                           unsigned LLVMIntrinsic, @@ -3775,12 +4002,62 @@ public:                              SmallVectorImpl<llvm::Value*> &O,                              const char *name,                              unsigned shift = 0, bool rightshift = false); +  llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx, +                             const llvm::ElementCount &Count);    llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);    llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,                                     bool negateForRightShift);    llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,                                   llvm::Type *Ty, bool usgn, const char *name);    llvm::Value *vectorWrapScalar16(llvm::Value *Op); +  /// SVEBuiltinMemEltTy - Returns the memory element type for this memory +  /// access builtin.  Only required if it can't be inferred from the base +  /// pointer operand. +  llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags); + +  SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags, +                                                   llvm::Type *ReturnType, +                                                   ArrayRef<llvm::Value *> Ops); +  llvm::Type *getEltType(SVETypeFlags TypeFlags); +  llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags); +  llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags); +  llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags); +  llvm::Value *EmitSVEDupX(llvm::Value *Scalar); +  llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty); +  llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty); +  llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags, +                            llvm::SmallVectorImpl<llvm::Value *> &Ops, +                            unsigned BuiltinID); +  llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags, +                           llvm::ArrayRef<llvm::Value *> Ops, +                           unsigned BuiltinID); +  llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred, +                                    llvm::ScalableVectorType *VTy); +  llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags, +                                 llvm::SmallVectorImpl<llvm::Value *> &Ops, +                                 unsigned IntID); +  llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags, +                                   llvm::SmallVectorImpl<llvm::Value *> &Ops, +                                   unsigned IntID); +  llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy, +                                 SmallVectorImpl<llvm::Value *> &Ops, +                                 unsigned BuiltinID, bool IsZExtReturn); +  llvm::Value *EmitSVEMaskedStore(const CallExpr *, +                                  SmallVectorImpl<llvm::Value *> &Ops, +                                  unsigned BuiltinID); +  llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags, +                                   SmallVectorImpl<llvm::Value *> &Ops, +                                   unsigned BuiltinID); +  llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags, +                                     SmallVectorImpl<llvm::Value *> &Ops, +                                     unsigned IntID); +  llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags, +                                 SmallVectorImpl<llvm::Value *> &Ops, unsigned IntID); +  llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags, +                                  SmallVectorImpl<llvm::Value *> &Ops, +                                  unsigned IntID); +  llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); +    llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,                                        llvm::Triple::ArchType Arch);    llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E); @@ -3794,6 +4071,9 @@ public:    llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,                                            const CallExpr *E);    llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E); +  bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope, +                               llvm::AtomicOrdering &AO, +                               llvm::SyncScope::ID &SSID);  private:    enum class MSVCIntrin; @@ -3924,6 +4204,10 @@ public:    /// aggregate type into a temporary LValue.    LValue EmitAggExprToLValue(const Expr *E); +  /// Build all the stores needed to initialize an aggregate at Dest with the +  /// value Val. +  void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile); +    /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,    /// make sure it survives garbage collection until this point.    void EmitExtendGCLifetime(llvm::Value *object); @@ -3974,6 +4258,9 @@ public:    /// Call atexit() with function dtorStub.    void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub); +  /// Call unatexit() with function dtorStub. +  llvm::Value *unregisterGlobalDtorWithUnAtExit(llvm::Function *dtorStub); +    /// Emit code in this function to perform a guarded variable    /// initialization.  Guarded initializations are used when it's not    /// possible to prove that an initialization will be done exactly @@ -3997,12 +4284,12 @@ public:                              ArrayRef<llvm::Function *> CXXThreadLocals,                              ConstantAddress Guard = ConstantAddress::invalid()); -  /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global +  /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global    /// variables. -  void GenerateCXXGlobalDtorsFunc( +  void GenerateCXXGlobalCleanUpFunc(        llvm::Function *Fn,        const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, -                                   llvm::Constant *>> &DtorsAndObjects); +                                   llvm::Constant *>> &DtorsOrStermFinalizers);    void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,                                          const VarDecl *D, @@ -4013,14 +4300,6 @@ public:    void EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, const Expr *Exp); -  void enterFullExpression(const FullExpr *E) { -    if (const auto *EWC = dyn_cast<ExprWithCleanups>(E)) -      if (EWC->getNumObjects() == 0) -        return; -    enterNonTrivialFullExpression(E); -  } -  void enterNonTrivialFullExpression(const FullExpr *E); -    void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);    RValue EmitAtomicExpr(AtomicExpr *E); @@ -4175,6 +4454,9 @@ public:    /// SetFPModel - Control floating point behavior via fp-model settings.    void SetFPModel(); +  /// Set the codegen fast-math flags. +  void SetFastMathFlags(FPOptions FPFeatures); +  private:    llvm::MDNode *getRangeForLoadFromType(QualType Ty);    void EmitReturnOfRValue(RValue RV, QualType Ty); @@ -4195,7 +4477,7 @@ private:    ///    /// \param AI - The first function argument of the expansion.    void ExpandTypeFromArgs(QualType Ty, LValue Dst, -                          SmallVectorImpl<llvm::Value *>::iterator &AI); +                          llvm::Function::arg_iterator &AI);    /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg    /// Ty, into individual arguments on the provided vector \arg IRCallArgs, @@ -4411,10 +4693,15 @@ inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,    // Otherwise, it should be an alloca instruction, as set up in save().    auto alloca = cast<llvm::AllocaInst>(value.getPointer()); -  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlignment()); +  return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign());  }  }  // end namespace CodeGen + +// Map the LangOption for floating point exception behavior into +// the corresponding enum in the IR. +llvm::fp::ExceptionBehavior +ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind);  }  // end namespace clang  #endif | 
